Offline Inference With Profiler#
Source vllm-project/vllm.
1import os
2
3from vllm import LLM, SamplingParams
4
5# enable torch profiler, can also be set on cmd line
6os.environ["VLLM_TORCH_PROFILER_DIR"] = "./vllm_profile"
7
8# Sample prompts.
9prompts = [
10 "Hello, my name is",
11 "The president of the United States is",
12 "The capital of France is",
13 "The future of AI is",
14]
15# Create a sampling params object.
16sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
17
18# Create an LLM.
19llm = LLM(model="facebook/opt-125m", tensor_parallel_size=1)
20
21llm.start_profile()
22
23# Generate texts from the prompts. The output is a list of RequestOutput objects
24# that contain the prompt, generated text, and other information.
25outputs = llm.generate(prompts, sampling_params)
26
27llm.stop_profile()
28
29# Print the outputs.
30for output in outputs:
31 prompt = output.prompt
32 generated_text = output.outputs[0].text
33 print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")