|
| 1 | +import json |
| 2 | +import math |
| 3 | +import shutil |
| 4 | + |
| 5 | +import pytest |
| 6 | + |
| 7 | +from vllm.config import LoRAConfig |
| 8 | +from vllm.lora.peft_helper import PEFTHelper |
| 9 | + |
| 10 | +ERROR_CASES = [ |
| 11 | + ( |
| 12 | + "test_rank", |
| 13 | + { |
| 14 | + "r": 1024 |
| 15 | + }, |
| 16 | + "is greater than max_lora_rank", |
| 17 | + ), |
| 18 | + ( |
| 19 | + "test_bias", |
| 20 | + { |
| 21 | + "bias": "all" |
| 22 | + }, |
| 23 | + "Adapter bias cannot be used without bias_enabled", |
| 24 | + ), |
| 25 | + ("test_dora", { |
| 26 | + "use_dora": True |
| 27 | + }, "does not yet support DoRA"), |
| 28 | + ( |
| 29 | + "test_modules_to_save", |
| 30 | + { |
| 31 | + "modules_to_save": ["lm_head"] |
| 32 | + }, |
| 33 | + "only supports modules_to_save being None", |
| 34 | + ), |
| 35 | +] |
| 36 | + |
| 37 | + |
| 38 | +def test_peft_helper_pass(long_context_lora_files_16k_1, tmp_path): |
| 39 | + peft_helper = PEFTHelper.from_local_dir(long_context_lora_files_16k_1, |
| 40 | + max_position_embeddings=4096) |
| 41 | + lora_config = LoRAConfig(max_lora_rank=16, max_cpu_loras=3, max_loras=2) |
| 42 | + peft_helper.validate_legal(lora_config) |
| 43 | + assert peft_helper.r == 8 |
| 44 | + assert peft_helper.lora_alpha == 16 |
| 45 | + assert peft_helper.target_modules == [ |
| 46 | + "q_proj", |
| 47 | + "v_proj", |
| 48 | + "k_proj", |
| 49 | + "o_proj", |
| 50 | + "gate_proj", |
| 51 | + "up_proj", |
| 52 | + "down_proj", |
| 53 | + "embed_tokens", |
| 54 | + "lm_head", |
| 55 | + ] |
| 56 | + assert peft_helper.context_length == 16384 |
| 57 | + assert peft_helper.vllm_max_position_embeddings == 4096 |
| 58 | + assert peft_helper.vllm_long_context_scaling_factor == float( |
| 59 | + math.ceil(peft_helper.context_length / |
| 60 | + peft_helper.vllm_max_position_embeddings)) |
| 61 | + # test RSLoRA |
| 62 | + rslora_config = dict(use_rslora=True) |
| 63 | + test_dir = tmp_path / "test_rslora" |
| 64 | + shutil.copytree(long_context_lora_files_16k_1, test_dir) |
| 65 | + |
| 66 | + # Load and modify configuration |
| 67 | + config_path = test_dir / "adapter_config.json" |
| 68 | + with open(config_path) as f: |
| 69 | + adapter_config = json.load(f) |
| 70 | + # Apply configuration changes |
| 71 | + adapter_config.update(rslora_config) |
| 72 | + |
| 73 | + # Save modified configuration |
| 74 | + with open(config_path, "w") as f: |
| 75 | + json.dump(adapter_config, f) |
| 76 | + |
| 77 | + peft_helper = PEFTHelper.from_local_dir(test_dir, |
| 78 | + max_position_embeddings=4096) |
| 79 | + peft_helper.validate_legal(lora_config) |
| 80 | + scaling = peft_helper.lora_alpha / math.sqrt(peft_helper.r) |
| 81 | + assert abs(peft_helper.vllm_lora_scaling_factor - scaling) < 1e-3 |
| 82 | + |
| 83 | + |
| 84 | +@pytest.mark.parametrize("test_name,config_change,expected_error", ERROR_CASES) |
| 85 | +def test_peft_helper_error( |
| 86 | + sql_lora_files, |
| 87 | + tmp_path, |
| 88 | + test_name: str, |
| 89 | + config_change: dict, |
| 90 | + expected_error: str, |
| 91 | +): |
| 92 | + test_dir = tmp_path / test_name |
| 93 | + shutil.copytree(sql_lora_files, test_dir) |
| 94 | + |
| 95 | + # Load and modify configuration |
| 96 | + config_path = test_dir / "adapter_config.json" |
| 97 | + with open(config_path) as f: |
| 98 | + adapter_config = json.load(f) |
| 99 | + # Apply configuration changes |
| 100 | + adapter_config.update(config_change) |
| 101 | + |
| 102 | + # Save modified configuration |
| 103 | + with open(config_path, "w") as f: |
| 104 | + json.dump(adapter_config, f) |
| 105 | + lora_config = LoRAConfig(max_lora_rank=16, max_cpu_loras=3, max_loras=2) |
| 106 | + # Test loading the adapter |
| 107 | + with pytest.raises(ValueError, match=expected_error): |
| 108 | + PEFTHelper.from_local_dir( |
| 109 | + test_dir, max_position_embeddings=4096).validate_legal(lora_config) |
0 commit comments