Skip to content

Commit 5932f84

Browse files
author
paulyu
committed
[Bugfix] fix Mypy issues
Signed-off-by: paulyu <paulyu0307@gmail.com>
1 parent 7b5db80 commit 5932f84

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

tests/multicard/test_lora_quant_tp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
def test_quant_model_tp_equality(tinyllama_lora_files, model):
1010
if model.quantization == "GPTQ":
1111
pytest.skip("GPTQ lora outputs are just incredibly unstable")
12-
with VllmRunner(model=model.model_path,
12+
with VllmRunner(model_name=model.model_path,
1313
quantization=model.quantization,
1414
enable_lora=True,
1515
max_loras=4,
@@ -20,7 +20,7 @@ def test_quant_model_tp_equality(tinyllama_lora_files, model):
2020
del vllm_model_tp1
2121
cleanup_dist_env_and_memory()
2222

23-
with VllmRunner(model=model.model_path,
23+
with VllmRunner(model_name=model.model_path,
2424
quantization=model.quantization,
2525
enable_lora=True,
2626
max_loras=4,

tests/singlecard/test_lora_quant.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def expect_match(output, expected_output):
108108
max_tokens = 10
109109

110110
print("creating lora adapter")
111-
with VllmRunner(model=model.model_path,
111+
with VllmRunner(model_name=model.model_path,
112112
quantization=model.quantization,
113113
enable_lora=True,
114114
max_loras=4,

0 commit comments

Comments
 (0)