diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 4b19272f4a28..7594c6e6fbf1 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -404,10 +404,7 @@ Specified using `--task embed`. You should manually set mean pooling by passing `--override-pooler-config '{"pooling_type": "MEAN"}'`. !!! note - The HF implementation of `Alibaba-NLP/gte-Qwen2-1.5B-instruct` is hardcoded to use causal attention despite what is shown in `config.json`. To compare vLLM vs HF results, - you should set `--hf-overrides '{"is_causal": true}'` in vLLM so that the two implementations are consistent with each other. - - For both the 1.5B and 7B variants, you also need to enable `--trust-remote-code` for the correct tokenizer to be loaded. + For `Alibaba-NLP/gte-Qwen2-*`, you need to enable `--trust-remote-code` for the correct tokenizer to be loaded. See [relevant issue on HF Transformers](https://github.com/huggingface/transformers/issues/34882). !!! note diff --git a/tests/models/language/pooling/test_embedding.py b/tests/models/language/pooling/test_embedding.py index 9db385e77bdb..a44b2154b137 100644 --- a/tests/models/language/pooling/test_embedding.py +++ b/tests/models/language/pooling/test_embedding.py @@ -15,13 +15,12 @@ marks=[pytest.mark.core_model, pytest.mark.cpu_model]), pytest.param("sentence-transformers/all-MiniLM-L12-v2"), pytest.param("intfloat/multilingual-e5-small"), - pytest.param("Alibaba-NLP/gte-Qwen2-7B-instruct"), + pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct"), # [Decoder-only] pytest.param("BAAI/bge-multilingual-gemma2", marks=[pytest.mark.core_model]), pytest.param("intfloat/e5-mistral-7b-instruct", marks=[pytest.mark.core_model, pytest.mark.cpu_model]), - pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct"), pytest.param("ssmits/Qwen2-7B-Instruct-embed-base"), # [Cross-Encoder] pytest.param("sentence-transformers/stsb-roberta-base-v2"), @@ -47,9 +46,6 @@ def test_models( vllm_extra_kwargs["override_pooler_config"] = \ PoolerConfig(pooling_type="MEAN") - if model == "Alibaba-NLP/gte-Qwen2-1.5B-instruct": - vllm_extra_kwargs["hf_overrides"] = {"is_causal": True} - # The example_prompts has ending "\n", for example: # "Write a short story about a robot that dreams for the first time.\n" # sentence_transformers will strip the input texts, see: diff --git a/tests/models/language/pooling/test_gte.py b/tests/models/language/pooling/test_gte.py index b60d27aaa72b..91d10f529cd6 100644 --- a/tests/models/language/pooling/test_gte.py +++ b/tests/models/language/pooling/test_gte.py @@ -45,9 +45,6 @@ EmbedModelInfo("Alibaba-NLP/gte-Qwen2-1.5B-instruct", architecture="Qwen2ForCausalLM", enable_test=True), - EmbedModelInfo("Alibaba-NLP/gte-Qwen2-7B-instruct", - architecture="Qwen2ForCausalLM", - enable_test=False), ########## ModernBertModel EmbedModelInfo("Alibaba-NLP/gte-modernbert-base", architecture="ModernBertModel", @@ -61,9 +58,6 @@ def test_models_mteb(hf_runner, vllm_runner, from .mteb_utils import mteb_test_embed_models vllm_extra_kwargs: dict[str, Any] = {} - if model_info.name == "Alibaba-NLP/gte-Qwen2-1.5B-instruct": - vllm_extra_kwargs["hf_overrides"] = {"is_causal": True} - if model_info.architecture == "GteNewModel": vllm_extra_kwargs["hf_overrides"] = {"architectures": ["GteNewModel"]} @@ -81,9 +75,6 @@ def test_models_correctness(hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts = [str(s).strip() for s in example_prompts] vllm_extra_kwargs: dict[str, Any] = {} - if model_info.name == "Alibaba-NLP/gte-Qwen2-1.5B-instruct": - vllm_extra_kwargs["hf_overrides"] = {"is_causal": True} - if model_info.architecture == "GteNewModel": vllm_extra_kwargs["hf_overrides"] = {"architectures": ["GteNewModel"]}