OpenAI Chat Completion Client#
Source vllm-project/vllm.
1from openai import OpenAI
2
3# Modify OpenAI's API key and API base to use vLLM's API server.
4openai_api_key = "EMPTY"
5openai_api_base = "http://localhost:8000/v1"
6
7client = OpenAI(
8 # defaults to os.environ.get("OPENAI_API_KEY")
9 api_key=openai_api_key,
10 base_url=openai_api_base,
11)
12
13models = client.models.list()
14model = models.data[0].id
15
16chat_completion = client.chat.completions.create(
17 messages=[{
18 "role": "system",
19 "content": "You are a helpful assistant."
20 }, {
21 "role": "user",
22 "content": "Who won the world series in 2020?"
23 }, {
24 "role":
25 "assistant",
26 "content":
27 "The Los Angeles Dodgers won the World Series in 2020."
28 }, {
29 "role": "user",
30 "content": "Where was it played?"
31 }],
32 model=model,
33)
34
35print("Chat completion results:")
36print(chat_completion)