Skip to content

Commit 28b456c

Browse files
committed
skip rpc tests
Signed-off-by: Yan Chunwei <[email protected]>
1 parent c852836 commit 28b456c

File tree

2 files changed

+4
-0
lines changed

2 files changed

+4
-0
lines changed

tests/unittest/llmapi/test_llm_multi_gpu_pytorch.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ def test_llama_7b_multi_lora_tp2():
6262
cuda_graph_config=None)
6363

6464

65+
@pytest.mark.skip(reason="https://nvbugs/5560921")
6566
@skip_ray
6667
@pytest.mark.gpu2
6768
def test_llm_rpc_tp2():
@@ -80,6 +81,7 @@ def test_llm_rpc_tp2():
8081
assert len(res.outputs[0].token_ids) == 10
8182

8283

84+
@pytest.mark.skip(reason="https://nvbugs/5560921")
8385
@skip_ray
8486
@pytest.mark.gpu2
8587
@pytest.mark.asyncio

tests/unittest/llmapi/test_llm_pytorch.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -956,6 +956,7 @@ def test_max_num_token_check(self):
956956
llm.generate([ids])
957957

958958

959+
@pytest.mark.skip(reason="https://nvbugs/5560921")
959960
@skip_ray
960961
def test_llm_rpc():
961962
# TODO: remove the with-statement when shutdown hang issue is fixed
@@ -973,6 +974,7 @@ def test_llm_rpc():
973974
assert len(res.outputs[0].token_ids) == 10
974975

975976

977+
@pytest.mark.skip(reason="https://nvbugs/5560921")
976978
@skip_ray
977979
@pytest.mark.asyncio
978980
async def test_llm_rpc_streaming():

0 commit comments

Comments
 (0)