We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 65393ee commit be1e128Copy full SHA for be1e128
tests/lora/test_llama_tp.py
@@ -4,6 +4,8 @@
4
import sys
5
from typing import Union
6
7
+import pytest
8
+
9
import vllm
10
from vllm import LLM
11
from vllm.lora.request import LoRARequest
@@ -149,6 +151,8 @@ def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files):
149
151
generate_and_test(llm, sql_lora_files)
150
152
153
154
+@pytest.mark.skip(reason=("Skipping this test as tensorizer is not "
155
+ "working with LoRA as of #19619"))
156
@multi_gpu_test(num_gpus=2)
157
@create_new_process_for_each_test()
158
def test_tp2_serialize_and_deserialize_lora(tmp_path, sql_lora_files,
0 commit comments