Skip to content

Commit 1f7a164

Browse files
crazydemoLarryXFly
andauthored
[None][fix] update skip case (#7193)
Signed-off-by: Ivy Zhang <[email protected]> Co-authored-by: Larry <[email protected]>
1 parent ebbbacf commit 1f7a164

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

tests/integration/defs/accuracy/test_llm_api_pytorch.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2408,6 +2408,7 @@ class TestEXAONE4(LlmapiAccuracyTestHarness):
24082408
kv_cache_config = KvCacheConfig(enable_block_reuse=False,
24092409
enable_partial_reuse=False)
24102410

2411+
@pytest.mark.skip_less_device_memory(80000)
24112412
def test_auto_dtype(self):
24122413
model_path = f"{llm_models_root()}/EXAONE-4.0-32B"
24132414
with LLM(model_path, kv_cache_config=self.kv_cache_config) as llm:

tests/integration/defs/test_e2e.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2178,15 +2178,15 @@ def test_ptp_quickstart_advanced_8gpus_chunked_prefill_sq_22k(
21782178
# This test is specifically to be run on 2 GPUs on Blackwell RTX 6000 Pro (SM120) architecture
21792179
# TODO: remove once we have a node with 8 GPUs and reuse test_ptp_quickstart_advanced_8gpus
21802180
@skip_no_sm120
2181-
@pytest.mark.skip_less_device_memory(80000)
2181+
@pytest.mark.skip_less_device_memory(160000)
21822182
@pytest.mark.skip_less_device(2)
21832183
@pytest.mark.parametrize("model_name,model_path", [
21842184
('Nemotron-Super-49B-v1-BF16',
21852185
'nemotron-nas/Llama-3_3-Nemotron-Super-49B-v1'),
21862186
("Mixtral-8x7B-BF16", "Mixtral-8x7B-Instruct-v0.1"),
21872187
pytest.param('Llama3.1-70B-BF16',
21882188
'llama-3.1-model/Meta-Llama-3.1-70B',
2189-
marks=pytest.mark.skip_less_device_memory(95000)),
2189+
marks=pytest.mark.skip_less_device_memory(190000)),
21902190
])
21912191
def test_ptp_quickstart_advanced_2gpus_sm120(llm_root, llm_venv, model_name,
21922192
model_path):

tests/integration/test_lists/qa/llm_function_full.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,6 @@ accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_guided_decoding_
451451
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_tp4
452452
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_nvfp4_tp4
453453
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=True]
454-
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=False]
455454
accuracy/test_llm_api_pytorch.py::TestMistral7B::test_auto_dtype
456455
accuracy/test_llm_api_pytorch.py::TestGemma3_1BInstruct::test_auto_dtype
457456
accuracy/test_llm_api_pytorch.py::TestMistralSmall24B::test_auto_dtype

0 commit comments

Comments
 (0)