|
| 1 | +""" |
| 2 | +This file serves as a documentation example and CI test for basic LLM batch inference. |
| 3 | +
|
| 4 | +""" |
| 5 | + |
| 6 | +# Dependency setup |
| 7 | +import subprocess |
| 8 | +import sys |
| 9 | + |
| 10 | +subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "ray[llm]"]) |
| 11 | +subprocess.check_call( |
| 12 | + [sys.executable, "-m", "pip", "install", "--upgrade", "transformers"] |
| 13 | +) |
| 14 | +subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy==1.26.4"]) |
| 15 | + |
| 16 | + |
| 17 | +# __basic_llm_example_start__ |
| 18 | +import ray |
| 19 | +from ray.data.llm import vLLMEngineProcessorConfig, build_llm_processor |
| 20 | + |
| 21 | +# __basic_config_example_start__ |
| 22 | +# Basic vLLM configuration |
| 23 | +config = vLLMEngineProcessorConfig( |
| 24 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 25 | + engine_kwargs={ |
| 26 | + "enable_chunked_prefill": True, |
| 27 | + "max_num_batched_tokens": 4096, # Reduce if CUDA OOM occurs |
| 28 | + "max_model_len": 16384, |
| 29 | + }, |
| 30 | + concurrency=1, |
| 31 | + batch_size=64, |
| 32 | +) |
| 33 | +# __basic_config_example_end__ |
| 34 | + |
| 35 | +processor = build_llm_processor( |
| 36 | + config, |
| 37 | + preprocess=lambda row: dict( |
| 38 | + messages=[ |
| 39 | + {"role": "system", "content": "You are a bot that responds with haikus."}, |
| 40 | + {"role": "user", "content": row["item"]}, |
| 41 | + ], |
| 42 | + sampling_params=dict( |
| 43 | + temperature=0.3, |
| 44 | + max_tokens=250, |
| 45 | + ), |
| 46 | + ), |
| 47 | + postprocess=lambda row: dict( |
| 48 | + answer=row["generated_text"], |
| 49 | + **row, # This will return all the original columns in the dataset. |
| 50 | + ), |
| 51 | +) |
| 52 | + |
| 53 | +ds = ray.data.from_items(["Start of the haiku is: Complete this for me..."]) |
| 54 | + |
| 55 | +if __name__ == "__main__": |
| 56 | + try: |
| 57 | + import torch |
| 58 | + |
| 59 | + if torch.cuda.is_available(): |
| 60 | + ds = processor(ds) |
| 61 | + ds.show(limit=1) |
| 62 | + else: |
| 63 | + print("Skipping basic LLM run (no GPU available)") |
| 64 | + except Exception as e: |
| 65 | + print(f"Skipping basic LLM run due to environment error: {e}") |
| 66 | + |
| 67 | +# __hf_token_config_example_start__ |
| 68 | +# Configuration with Hugging Face token |
| 69 | +config_with_token = vLLMEngineProcessorConfig( |
| 70 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 71 | + runtime_env={"env_vars": {"HF_TOKEN": "your_huggingface_token"}}, |
| 72 | + concurrency=1, |
| 73 | + batch_size=64, |
| 74 | +) |
| 75 | +# __hf_token_config_example_end__ |
| 76 | + |
| 77 | +# __parallel_config_example_start__ |
| 78 | +# Model parallelism configuration for larger models |
| 79 | +# tensor_parallel_size=2: Split model across 2 GPUs for tensor parallelism |
| 80 | +# pipeline_parallel_size=2: Use 2 pipeline stages (total 4 GPUs needed) |
| 81 | +# Total GPUs required = tensor_parallel_size * pipeline_parallel_size = 4 |
| 82 | +config = vLLMEngineProcessorConfig( |
| 83 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 84 | + engine_kwargs={ |
| 85 | + "max_model_len": 16384, |
| 86 | + "tensor_parallel_size": 2, |
| 87 | + "pipeline_parallel_size": 2, |
| 88 | + "enable_chunked_prefill": True, |
| 89 | + "max_num_batched_tokens": 2048, |
| 90 | + }, |
| 91 | + concurrency=1, |
| 92 | + batch_size=32, |
| 93 | + accelerator_type="L4", |
| 94 | +) |
| 95 | +# __parallel_config_example_end__ |
| 96 | + |
| 97 | +# __runai_config_example_start__ |
| 98 | +# RunAI streamer configuration for optimized model loading |
| 99 | +# Note: Install vLLM with runai dependencies: pip install -U "vllm[runai]>=0.10.1" |
| 100 | +config = vLLMEngineProcessorConfig( |
| 101 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 102 | + engine_kwargs={ |
| 103 | + "load_format": "runai_streamer", |
| 104 | + "max_model_len": 16384, |
| 105 | + }, |
| 106 | + concurrency=1, |
| 107 | + batch_size=64, |
| 108 | +) |
| 109 | +# __runai_config_example_end__ |
| 110 | + |
| 111 | +# __lora_config_example_start__ |
| 112 | +# Multi-LoRA configuration |
| 113 | +config = vLLMEngineProcessorConfig( |
| 114 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 115 | + engine_kwargs={ |
| 116 | + "enable_lora": True, |
| 117 | + "max_lora_rank": 32, |
| 118 | + "max_loras": 1, |
| 119 | + "max_model_len": 16384, |
| 120 | + }, |
| 121 | + concurrency=1, |
| 122 | + batch_size=32, |
| 123 | +) |
| 124 | +# __lora_config_example_end__ |
| 125 | + |
| 126 | +# __s3_config_example_start__ |
| 127 | +# S3 hosted model configuration |
| 128 | +s3_config = vLLMEngineProcessorConfig( |
| 129 | + model_source="s3://your-bucket/your-model-path/", |
| 130 | + engine_kwargs={ |
| 131 | + "load_format": "runai_streamer", |
| 132 | + "max_model_len": 16384, |
| 133 | + }, |
| 134 | + concurrency=1, |
| 135 | + batch_size=64, |
| 136 | +) |
| 137 | +# __s3_config_example_end__ |
| 138 | + |
| 139 | +# __gpu_memory_config_example_start__ |
| 140 | +# GPU memory management configuration |
| 141 | +# If you encounter CUDA out of memory errors, try these optimizations: |
| 142 | +config_memory_optimized = vLLMEngineProcessorConfig( |
| 143 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 144 | + engine_kwargs={ |
| 145 | + "max_model_len": 8192, |
| 146 | + "max_num_batched_tokens": 2048, |
| 147 | + "enable_chunked_prefill": True, |
| 148 | + "gpu_memory_utilization": 0.85, |
| 149 | + "block_size": 16, |
| 150 | + }, |
| 151 | + concurrency=1, |
| 152 | + batch_size=16, |
| 153 | +) |
| 154 | + |
| 155 | +# For very large models or limited GPU memory: |
| 156 | +config_minimal_memory = vLLMEngineProcessorConfig( |
| 157 | + model_source="unsloth/Llama-3.1-8B-Instruct", |
| 158 | + engine_kwargs={ |
| 159 | + "max_model_len": 4096, |
| 160 | + "max_num_batched_tokens": 1024, |
| 161 | + "enable_chunked_prefill": True, |
| 162 | + "gpu_memory_utilization": 0.75, |
| 163 | + }, |
| 164 | + concurrency=1, |
| 165 | + batch_size=8, |
| 166 | +) |
| 167 | +# __gpu_memory_config_example_end__ |
| 168 | + |
| 169 | +# __embedding_config_example_start__ |
| 170 | +# Embedding model configuration |
| 171 | +embedding_config = vLLMEngineProcessorConfig( |
| 172 | + model_source="sentence-transformers/all-MiniLM-L6-v2", |
| 173 | + task_type="embed", |
| 174 | + engine_kwargs=dict( |
| 175 | + enable_prefix_caching=False, |
| 176 | + enable_chunked_prefill=False, |
| 177 | + max_model_len=256, |
| 178 | + enforce_eager=True, |
| 179 | + ), |
| 180 | + batch_size=32, |
| 181 | + concurrency=1, |
| 182 | + apply_chat_template=False, |
| 183 | + detokenize=False, |
| 184 | +) |
| 185 | + |
| 186 | +# Example usage for embeddings |
| 187 | +def create_embedding_processor(): |
| 188 | + return build_llm_processor( |
| 189 | + embedding_config, |
| 190 | + preprocess=lambda row: dict(prompt=row["text"]), |
| 191 | + postprocess=lambda row: { |
| 192 | + "text": row["prompt"], |
| 193 | + "embedding": row["embeddings"], |
| 194 | + }, |
| 195 | + ) |
| 196 | + |
| 197 | + |
| 198 | +# __embedding_config_example_end__ |
| 199 | + |
| 200 | +# __basic_llm_example_end__ |
0 commit comments