|
| 1 | +""" |
| 2 | +This file serves as a documentation example and CI test for basic LLM batch inference. |
| 3 | +
|
| 4 | +Structure: |
| 5 | +1. Infrastructure setup: Ray initialization, GPU requirements handling for CI |
| 6 | +2. Docs example (between __basic_llm_example_start/end__): Embedded in Sphinx docs via literalinclude |
| 7 | +3. Test validation and cleanup |
| 8 | +""" |
| 9 | + |
| 10 | +import ray |
| 11 | +from ray.data.llm import vLLMEngineProcessorConfig, build_llm_processor |
| 12 | + |
| 13 | +# Infrastructure: Setup for CI testing - remove GPU requirements |
| 14 | +_original_build_llm_processor = build_llm_processor |
| 15 | + |
| 16 | +def _testing_build_llm_processor(config, **kwargs): |
| 17 | + """Remove accelerator requirements for testing""" |
| 18 | + if hasattr(config, 'accelerator_type'): |
| 19 | + config.accelerator_type = None |
| 20 | + return _original_build_llm_processor(config, **kwargs) |
| 21 | + |
| 22 | +# Apply monkeypatch for testing |
| 23 | +build_llm_processor = _testing_build_llm_processor |
| 24 | + |
| 25 | +# __basic_llm_example_start__ |
| 26 | +import ray.data |
| 27 | +from ray.data.llm import vLLMEngineProcessorConfig, build_llm_processor |
| 28 | + |
| 29 | +def create_basic_config(): |
| 30 | + """Create basic vLLM configuration.""" |
| 31 | + return vLLMEngineProcessorConfig( |
| 32 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 33 | + engine_kwargs={"max_model_len": 20000}, |
| 34 | + concurrency=1, |
| 35 | + batch_size=64, |
| 36 | + ) |
| 37 | + |
| 38 | +def create_parallel_config(): |
| 39 | + """Create model parallelism configuration.""" |
| 40 | + return vLLMEngineProcessorConfig( |
| 41 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 42 | + engine_kwargs={ |
| 43 | + "max_model_len": 16384, |
| 44 | + "tensor_parallel_size": 4, |
| 45 | + "pipeline_parallel_size": 2, |
| 46 | + "enable_chunked_prefill": True, |
| 47 | + "max_num_batched_tokens": 2048, |
| 48 | + }, |
| 49 | + concurrency=1, |
| 50 | + batch_size=64, |
| 51 | + ) |
| 52 | + |
| 53 | +def create_runai_config(): |
| 54 | + """Create RunAI streamer configuration.""" |
| 55 | + return vLLMEngineProcessorConfig( |
| 56 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 57 | + engine_kwargs={"load_format": "runai_streamer"}, |
| 58 | + concurrency=1, |
| 59 | + batch_size=64, |
| 60 | + ) |
| 61 | + |
| 62 | +def create_s3_config(): |
| 63 | + """Create S3 hosted model configuration.""" |
| 64 | + return vLLMEngineProcessorConfig( |
| 65 | + model_source="s3://your-bucket/your-model/", |
| 66 | + engine_kwargs={"load_format": "runai_streamer"}, |
| 67 | + runtime_env={"env_vars": { |
| 68 | + "AWS_ACCESS_KEY_ID": "your_access_key_id", |
| 69 | + "AWS_SECRET_ACCESS_KEY": "your_secret_access_key", |
| 70 | + "AWS_REGION": "your_region", |
| 71 | + }}, |
| 72 | + concurrency=1, |
| 73 | + batch_size=64, |
| 74 | + ) |
| 75 | + |
| 76 | +def create_lora_config(): |
| 77 | + """Create multi-LoRA configuration.""" |
| 78 | + return vLLMEngineProcessorConfig( |
| 79 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 80 | + engine_kwargs={ |
| 81 | + "enable_lora": True, |
| 82 | + "max_lora_rank": 32, |
| 83 | + "max_loras": 1, |
| 84 | + }, |
| 85 | + concurrency=1, |
| 86 | + batch_size=64, |
| 87 | + ) |
| 88 | + |
| 89 | +def run_basic_example(): |
| 90 | + """Run the basic LLM example.""" |
| 91 | + config = create_basic_config() |
| 92 | + ds = ray.data.from_items([{"text": "Write a haiku about machine learning."}]) |
| 93 | + processor = build_llm_processor(config) |
| 94 | + print("LLM processor configured successfully") |
| 95 | + return config, ds, processor |
| 96 | + |
| 97 | +# __basic_llm_example_end__ |
| 98 | + |
| 99 | +# Test validation and cleanup |
| 100 | +def run_test(): |
| 101 | + """Test function that validates the example works including all configurations.""" |
| 102 | + import sys |
| 103 | + suppress_output = 'pytest' in sys.modules |
| 104 | + |
| 105 | + try: |
| 106 | + # Test 1: Basic configuration |
| 107 | + basic_config = create_basic_config() |
| 108 | + assert basic_config.model_source == "unsloth/Llama-3.1-8B-Instruct" |
| 109 | + assert basic_config.batch_size == 64 |
| 110 | + assert basic_config.engine_kwargs["max_model_len"] == 20000 |
| 111 | + |
| 112 | + # Test 2: Model parallelism configuration |
| 113 | + parallel_config = create_parallel_config() |
| 114 | + assert parallel_config.engine_kwargs["tensor_parallel_size"] == 4 |
| 115 | + assert parallel_config.engine_kwargs["pipeline_parallel_size"] == 2 |
| 116 | + assert parallel_config.engine_kwargs["enable_chunked_prefill"] is True |
| 117 | + assert parallel_config.engine_kwargs["max_num_batched_tokens"] == 2048 |
| 118 | + |
| 119 | + # Test 3: RunAI streamer configuration |
| 120 | + runai_config = create_runai_config() |
| 121 | + assert runai_config.engine_kwargs["load_format"] == "runai_streamer" |
| 122 | + assert runai_config.model_source == "unsloth/Llama-3.1-8B-Instruct" |
| 123 | + |
| 124 | + # Test 4: S3 configuration with environment variables |
| 125 | + s3_config = create_s3_config() |
| 126 | + assert s3_config.model_source == "s3://your-bucket/your-model/" |
| 127 | + assert s3_config.engine_kwargs["load_format"] == "runai_streamer" |
| 128 | + assert "AWS_ACCESS_KEY_ID" in s3_config.runtime_env["env_vars"] |
| 129 | + assert "AWS_SECRET_ACCESS_KEY" in s3_config.runtime_env["env_vars"] |
| 130 | + assert "AWS_REGION" in s3_config.runtime_env["env_vars"] |
| 131 | + |
| 132 | + # Test 5: Multi-LoRA configuration |
| 133 | + lora_config = create_lora_config() |
| 134 | + assert lora_config.engine_kwargs["enable_lora"] is True |
| 135 | + assert lora_config.engine_kwargs["max_lora_rank"] == 32 |
| 136 | + assert lora_config.engine_kwargs["max_loras"] == 1 |
| 137 | + |
| 138 | + # Test 6: Processor creation works (tests Ray integration) |
| 139 | + from ray.data.llm import build_llm_processor |
| 140 | + test_processor = build_llm_processor(basic_config) |
| 141 | + assert test_processor is not None |
| 142 | + |
| 143 | + if not suppress_output: |
| 144 | + print("Basic LLM example validation successful (all configs tested)") |
| 145 | + return True |
| 146 | + except Exception as e: |
| 147 | + if not suppress_output: |
| 148 | + print(f"Basic LLM example validation failed: {e}") |
| 149 | + return False |
| 150 | + |
| 151 | +if __name__ == "__main__": |
| 152 | + # Run the basic example |
| 153 | + run_basic_example() |
| 154 | + # Run validation tests |
| 155 | + run_test() |
0 commit comments