Skip to content

Commit ac60374

Browse files
committed
cleanup
1 parent ebc12f1 commit ac60374

File tree

1 file changed

+2
-5
lines changed

1 file changed

+2
-5
lines changed

examples/offline_inference.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
from vllm import LLM, SamplingParams
2-
import torch
32

43
# Sample prompts.
54
prompts = [
@@ -9,12 +8,10 @@
98
"The future of AI is",
109
]
1110
# Create a sampling params object.
12-
sampling_params = SamplingParams(temperature=0.0, top_p=0.95)
11+
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
1312

1413
# Create an LLM.
15-
#llm = LLM(model="state-spaces/mamba-370m-hf", dtype=torch.float32)
16-
llm = LLM(model="state-spaces/mamba2-130m", dtype=torch.float32)
17-
14+
llm = LLM(model="facebook/opt-125m")
1815
# Generate texts from the prompts. The output is a list of RequestOutput objects
1916
# that contain the prompt, generated text, and other information.
2017
outputs = llm.generate(prompts, sampling_params)

0 commit comments

Comments
 (0)