OpenAI Embedding Client#

Source vllm-project/vllm.

 1from openai import OpenAI
 2
 3# Modify OpenAI's API key and API base to use vLLM's API server.
 4openai_api_key = "EMPTY"
 5openai_api_base = "http://localhost:8000/v1"
 6
 7client = OpenAI(
 8    # defaults to os.environ.get("OPENAI_API_KEY")
 9    api_key=openai_api_key,
10    base_url=openai_api_base,
11)
12
13models = client.models.list()
14model = models.data[0].id
15
16responses = client.embeddings.create(
17    input=[
18        "Hello my name is",
19        "The best thing about vLLM is that it supports many different models"
20    ],
21    model=model,
22    encoding_format="float",
23)
24
25for data in responses.data:
26    print(data.embedding)  # list of float of len 4096