Skip to content

Commit 334e2ca

Browse files
authored
[https://nvbugs/5542867][fix] Fix the non-determinism issue in the mm_encoder test (#8033)
Signed-off-by: Chang Liu (Enterprise Products) <[email protected]>
1 parent e5f9b6a commit 334e2ca

File tree

1 file changed

+6
-5
lines changed

1 file changed

+6
-5
lines changed

tests/unittest/_torch/multimodal/test_mm_encoder_standalone.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,6 @@ def test_single_image_chat(model_key, multimodal_model_config):
168168
"llava-v1.6-mistral-7b-hf",
169169
])
170170
def test_multi_request_batch_chat(model_key, multimodal_model_config):
171-
pytest.skip("https://nvbugspro.nvidia.com/bug/5542867")
172171
"""Test batching multiple multimodal requests and verify encoder path matches raw path.
173172
174173
This mirrors test_single_image_chat but with a batch of size 3.
@@ -200,10 +199,12 @@ def test_multi_request_batch_chat(model_key, multimodal_model_config):
200199

201200
encoder = MultimodalEncoder(model=encoder_model_dir,
202201
max_batch_size=max_batch_size)
203-
llm = LLM(model=encoder_model_dir,
204-
backend='pytorch',
205-
kv_cache_config=kv_cache_config,
206-
trust_remote_code=True)
202+
llm = LLM(
203+
model=encoder_model_dir,
204+
backend='pytorch',
205+
kv_cache_config=kv_cache_config,
206+
max_batch_size=1, # fix batch size to reduce non-determinism in tests
207+
trust_remote_code=True)
207208

208209
config_path = os.path.join(llm._hf_model_dir, 'config.json')
209210
assert os.path.exists(

0 commit comments

Comments
 (0)