Offline Inference Arctic#

Source vllm-project/vllm.

 1from vllm import LLM, SamplingParams
 2
 3# Sample prompts.
 4prompts = [
 5    "Hello, my name is",
 6    "The president of the United States is",
 7    "The capital of France is",
 8    "The future of AI is",
 9]
10# Create a sampling params object.
11sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
12
13# Create an LLM.
14llm = LLM(model="snowflake/snowflake-arctic-instruct",
15          quantization="deepspeedfp",
16          tensor_parallel_size=8,
17          trust_remote_code=True)
18# Generate texts from the prompts. The output is a list of RequestOutput objects
19# that contain the prompt, generated text, and other information.
20
21outputs = llm.generate(prompts, sampling_params)
22# Print the outputs.
23for output in outputs:
24    prompt = output.prompt
25    generated_text = output.outputs[0].text
26    print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")