From ad54e80ba8c3d2d598437f19f9f519220158a090 Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Thu, 31 Jul 2025 02:27:49 -0700 Subject: [PATCH 1/6] fp8/nvfp4 quantization support --- tools/llm/README.md | 29 ++++ tools/llm/quantize_utils.py | 267 ++++++++++++++++++++++++++++++++++++ tools/llm/run_llm.py | 36 ++++- 3 files changed, 329 insertions(+), 3 deletions(-) create mode 100644 tools/llm/quantize_utils.py diff --git a/tools/llm/README.md b/tools/llm/README.md index 05a1e3cc60..d5ccb6f291 100644 --- a/tools/llm/README.md +++ b/tools/llm/README.md @@ -7,6 +7,7 @@ This directory provides utilities and scripts for compiling, optimizing, and ben - **Model Support:** Works with popular LLMs such as Llama-3, Qwen2.5, etc. - **VLM Support:** Supports Visual Language Models like Qwen2.5-VL and Eagle2. - **Precision Modes:** Supports FP16, BF16, and FP32. +- **Quantization:** Supports FP8 and NVFP4 quantization formats for reduced memory usage and improved inference speed. - **KV Cache:** Supports static and dynamic KV cache for efficient autoregressive decoding. - **Benchmarking:** Measures and compares throughput and latency for PyTorch and TensorRT backends. - **Custom Attention:** Registers and converts custom scaled dot-product attention (SDPA) for compatibility with TensorRT. @@ -54,11 +55,39 @@ python run_vlm.py --model nvidia/Eagle2-2B --precision FP16 --num_tokens 128 --c - `--prompt`: Input prompt for generation. - `--image_path`: (Optional) Path to input image file for VLM models. If not provided, will use a sample image. - `--precision`: Precision mode (`FP16`, `FP32`). +- `--qformat`: Quantization format (`fp8`, `nvfp4`) to apply. +- `--pre_quantized`: Flag to use pre-quantized models from HuggingFace. - `--num_tokens`: Number of output tokens to generate. - `--cache`: KV cache type (`static_v1`, `static_v2`, or empty for no KV caching). - `--benchmark`: Enable benchmarking mode. - `--enable_pytorch_run`: Also run and compare PyTorch baseline. +### Quantization + +Torch-TensorRT supports quantization to reduce model memory footprint and improve inference performance: + +#### Using Pre-quantized Models + +To use pre-quantized models from HuggingFace: + +```bash +python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --pre_quantized --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +``` + +#### Applying quantization by ModelOpt + +Apply fp8 quantization from HuggingFace: + +```bash +python run_llm.py --model meta-llama/Llama-3.1-8B --qformat fp8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +``` + +#### Quantization Requirements + +- **ModelOpt Library**: Required for quantization operations +- **FP8**: Supported on Hopper and Blackwell-generation GPUs. +- **NVFP4**: Supported on Blackwell-generation GPUs. + ### Caching Strategies - **Static Cache v1/v2:** Adds static KV cache tensors as model inputs/outputs for efficient reuse. diff --git a/tools/llm/quantize_utils.py b/tools/llm/quantize_utils.py new file mode 100644 index 0000000000..a33d2d993f --- /dev/null +++ b/tools/llm/quantize_utils.py @@ -0,0 +1,267 @@ +import json +import logging +import os + +import huggingface_hub +import torch +from huggingface_hub import snapshot_download + +logger = logging.getLogger(__name__) + +try: + import modelopt.torch.quantization as mtq # noqa: F401f + + assert torch.ops.tensorrt.quantize_op.default +except Exception: + logger.warning("Unable to import quantization op. Please install modelopt library") + +from modelopt.core.torch.quantization.qtensor.nvfp4_tensor import NVFP4QTensor +from modelopt.torch.quantization.config import QuantizerAttributeConfig +from modelopt.torch.quantization.nn.modules.tensor_quantizer import TensorQuantizer +from modelopt.torch.utils.dataset_utils import ( + create_forward_loop, + get_dataset_dataloader, +) +from safetensors import safe_open + + +def quantize_model(model, args, tokenizer): + """ + Quantize a PyTorch model using ModelOpt quantization. + + This function performs post-training quantization (PTQ) on the model using + calibration data from the provided tokenizer. It supports both FP8 and NVFP4 + quantization formats. + + Args: + model: PyTorch model to quantize + args: Arguments containing quantization format and debug settings + tokenizer: Tokenizer for creating calibration dataloader + + Returns: + Quantized model with reduced precision weights and activations + + Raises: + RuntimeError: If unsupported quantization format is specified + """ + # Create calibration dataloader for quantization + calib_dataloader = get_dataset_dataloader( + tokenizer=tokenizer, + batch_size=32, + num_samples=512, + device="cuda:0", + ) + if args.qformat == "fp8": + quant_cfg = mtq.FP8_DEFAULT_CFG + elif args.qformat == "nvfp4": + quant_cfg = mtq.NVFP4_DEFAULT_CFG + else: + raise RuntimeError("Unsupported quantization format") + calibrate_loop = create_forward_loop(dataloader=calib_dataloader) + + model = mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop) + if args.debug: + mtq.print_quant_summary(model) + + return model + + +class TensorRTQuantizedLinear(torch.nn.Module): + """ + TensorRT quantized linear layer that applies quantization to both input and weight tensors. + """ + + def __init__( + self, original_linear: torch.nn.Linear, input_amax, weight_amax, quant_cfg + ): + """ + Initialize quantized linear layer. + + Args: + original_linear: Original PyTorch linear layer to quantize + input_amax: Maximum absolute value for input quantization scaling + weight_amax: Maximum absolute value for weight quantization scaling + quant_cfg: Quantization configuration for TensorQuantizer + """ + super().__init__() + + # Store reference to original linear layer for weight access + self.original_linear = original_linear + + # Copy bias from original layer if it exists + if original_linear.bias is not None: + self.bias = torch.nn.Parameter(original_linear.bias.clone()).cuda() + else: + self.bias = None + + # Create quantizers for input and weight tensors + self.input_quantizer = TensorQuantizer( + quant_attribute_cfg=quant_cfg, amax=input_amax + ) + self.weight_quantizer = TensorQuantizer( + quant_attribute_cfg=quant_cfg, amax=weight_amax + ) + + def forward(self, input): + input = self.input_quantizer(input) + weight = self.weight_quantizer(self.original_linear.weight) + return torch.nn.functional.linear(input, weight, self.bias) + + +def convert_linear_to_tensorrt_quantized(model, model_name): + """ + Convert linear layers in a model to TensorRT quantized versions from pre-quantized weights. + + This function is specifically designed for Hugging Face quantized models and only + applies quantization to linear operations. It loads pre-quantized models from + Hugging Face format and replaces standard linear layers with TensorRTQuantizedLinear + layers. It supports both FP8 and NVFP4 quantization formats. + + The function: + 1. Loads quantization scales from Hugging Face model files (SafeTensors) + 2. Parses quantization configuration from hf_quant_config.json + 3. Replaces standard linear layers with TensorRTQuantizedLinear layers + 4. Applies appropriate quantization based on the model's quantization format + + Note: This function only quantizes linear operations and is intended for use + with pre-quantized Hugging Face models that have been quantized using ModelOpt. + + Args: + model: PyTorch model to quantize + model_name: Path to Hugging Face model directory or model identifier + + Returns: + Model with quantized linear layers + + Raises: + RuntimeError: If quantization config is not found or unsupported format + """ + # Determine if model_name is a local directory or needs to be downloaded + if os.path.isdir(model_name): + hf_folder = model_name + else: + # Download model from Hugging Face Hub + hf_folder = snapshot_download( + model_name, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ignore_patterns=["original/**/*"], + revision=None, + ) + + # Load all tensors from SafeTensors files + tensors = {} + for file in os.listdir(hf_folder): + if file.endswith(".safetensors"): + with safe_open( + os.path.join(hf_folder, file), framework="pt", device="cpu" + ) as f: + tensor_names = f.keys() + for name in tensor_names: + tensors[name] = f.get_tensor(name) + + # Load and parse quantization configuration + hf_quant_config_path = f"{hf_folder}/hf_quant_config.json" + if os.path.exists(hf_quant_config_path): + with open(hf_quant_config_path, "r") as f: + hf_quant_config = json.load(f) + hf_quant_config = hf_quant_config["quantization"] + + hf_quant_algo = hf_quant_config.pop("quant_algo", None) + if hf_quant_algo != "FP8" and hf_quant_algo != "NVFP4": + raise RuntimeError("Only FP8 or NVFP4 quantization is supported") + else: + raise RuntimeError("No quantization config found") + + # Iterate through all modules in the model + for name, module in model.named_modules(): + # Check if the module is a linear layer + target = torch.nn.modules.linear.Linear + if isinstance(module, target): + # Construct names for quantization scale tensors + # These follow the naming convention: module_name.weight_scale and module_name.input_scale + weight_scale_name = name + ".weight_scale" + input_scale_name = name + ".input_scale" + + if weight_scale_name not in tensors: + logger.warning(f"Weight scale tensor {weight_scale_name} not found") + continue + if input_scale_name not in tensors: + logger.warning(f"Input scale tensor {input_scale_name} not found") + continue + + if hf_quant_algo == "FP8": + # FP8 E4M3 format has a maximum representable value of 448.0 + # Scale the quantization parameters accordingly + weight_scale = tensors.pop(weight_scale_name) + weight_amax = weight_scale * 448.0 + input_amax = tensors.pop(input_scale_name) * 448.0 + + # Dequantize the weight using the scale factor + dequantized_weight_data = module.weight.to(torch.float32) * weight_scale + + # Configure quantizer for FP8 format (4 exponent bits, 3 mantissa bits) + quantizer_attribute_config = QuantizerAttributeConfig( + num_bits=(4, 3), axis=None + ) + + elif hf_quant_algo == "NVFP4": + # NVFP4 format requires additional scale tensor and different configuration + weight_name = name + ".weight" + weight_scale2_name = name + ".weight_scale_2" + weight_scale = tensors.pop(weight_scale_name) + input_scale = tensors.pop(input_scale_name) + weight_scale2 = tensors.pop(weight_scale2_name) + + # Calculate amax values with additional scaling factor for NVFP4 + input_amax = input_scale * 448.0 * 6.0 + weight_amax = weight_scale2 * 448.0 * 6.0 + + # Handle NVFP4 tensor format + weight_data = tensors.pop(weight_name) + original_shape = list(weight_data.shape) + original_shape[-1] *= 2 # NVFP4 packs 2 values per element + nvfp4_tensor = NVFP4QTensor( + torch.Size(original_shape), torch.float32, weight_data + ) + + # Dequantize using both scales and block size configuration + dequantized_weight_data = nvfp4_tensor.dequantize( + scale=weight_scale, double_scale=weight_scale2, block_sizes={-1: 16} + ) + + # Configure quantizer for NVFP4 format with dynamic block quantization + quantizer_attribute_config = QuantizerAttributeConfig( + num_bits=(2, 1), + axis=None, + block_sizes={-1: 16, "type": "dynamic", "scale_bits": (4, 3)}, + enable=True, + ) + + # Restore the weight to its original full-precision format so that QDQ nodes + # can be properly inserted and optimized during TensorRT compilation + module.weight.data = dequantized_weight_data + + # Create the quantized linear layer with calculated amax values + quantized_module = TensorRTQuantizedLinear( + module, input_amax, weight_amax, quantizer_attribute_config + ) + + # Replace the original module with the quantized version + # Extract parent module name and child module name + parent_name = ".".join(name.split(".")[:-1]) + child_name = name.split(".")[-1] + + if parent_name: + # Get the parent module and replace the child + parent_module = model.get_submodule(parent_name) + setattr(parent_module, child_name, quantized_module) + else: + # If no parent, replace at model level + setattr(model, child_name, quantized_module) + + # Log any unused tensors for debugging + if len(tensors) > 0: + logger.debug(f"{len(tensors)} tensors not used") + for key in tensors: + logger.debug(f" {key}") + return model diff --git a/tools/llm/run_llm.py b/tools/llm/run_llm.py index 1531c30622..31b1e36e96 100644 --- a/tools/llm/run_llm.py +++ b/tools/llm/run_llm.py @@ -9,6 +9,7 @@ import argparse import copy +import json import os import timeit from contextlib import nullcontext @@ -54,12 +55,15 @@ def get_model(args): args.model, use_cache=False, attn_implementation="sdpa", + ignore_mismatched_sizes=True, ) .eval() .cuda() ) # register SDPA variant for the model register_sdpa.enable_sdpa_converter(args.model, model.config) + if args.pre_quantized: + model = convert_linear_to_tensorrt_quantized(model, args.model).cuda() if args.precision == "FP16": model = model.to(torch.float16) @@ -93,7 +97,8 @@ def compile_torchtrt(model, input_ids, args): for optimized inference """ max_seq_len = input_ids.shape[1] + args.num_tokens - ep = export_llm(model, input_ids, max_seq_len=max_seq_len) + with export_torch_mode() if args.qformat or args.pre_quantized else nullcontext(): + ep = export_llm(model, input_ids, max_seq_len=max_seq_len) position_ids = torch.arange(input_ids.shape[1]).unsqueeze(0).to(DEVICE) # Set precision specific flags use_fp32_acc = False @@ -236,13 +241,36 @@ def measure_perf(trt_model, input_signature, backend_name): arg_parser.add_argument( "--benchmark", action="store_true", help="Enable benchmark (default: False)" ) - + arg_parser.add_argument( + "--qformat", + help=("Apply quantization format. Options: fp8, nvfp4 (default: None)"), + default=None, + ) + arg_parser.add_argument( + "--pre_quantized", + action="store_true", + help="Use pre-quantized hf model weights (default: False)", + ) args = arg_parser.parse_args() + + if args.qformat and args.pre_quantized: + print("Error: --qformat and --pre_quantized cannot be used together") + exit() + + if args.qformat or args.pre_quantized: + from modelopt.torch.quantization.utils import export_torch_mode + from quantize_utils import ( + convert_linear_to_tensorrt_quantized, + quantize_model, + ) + with torch.inference_mode(): model = get_model(args) tokenizer = AutoTokenizer.from_pretrained(args.tokenizer or args.model) - + # Set pad token + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token # Prepare input for benchmarking or evaluation if args.benchmark: input_ids = torch.randint( @@ -260,6 +288,8 @@ def measure_perf(trt_model, input_signature, backend_name): pyt_timings = None pyt_stats = None + if args.qformat != None: + model = quantize_model(model, args, tokenizer) if args.enable_pytorch_run: pyt_gen_tokens = generate( model, input_ids.clone(), MAX_OUTPUT_SEQ_LENGTH, tokenizer.eos_token_id From 57e37caacf26e1e17e935397c8d88a22baa07b1b Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Tue, 2 Sep 2025 12:58:19 +0000 Subject: [PATCH 2/6] chore: Detect pre-quantized hf model --- tools/llm/README.md | 7 ++- tools/llm/quantize_utils.py | 98 ++++++++++++++++++++----------------- tools/llm/run_llm.py | 35 ++++++------- 3 files changed, 71 insertions(+), 69 deletions(-) diff --git a/tools/llm/README.md b/tools/llm/README.md index d5ccb6f291..d9d59086df 100644 --- a/tools/llm/README.md +++ b/tools/llm/README.md @@ -55,8 +55,7 @@ python run_vlm.py --model nvidia/Eagle2-2B --precision FP16 --num_tokens 128 --c - `--prompt`: Input prompt for generation. - `--image_path`: (Optional) Path to input image file for VLM models. If not provided, will use a sample image. - `--precision`: Precision mode (`FP16`, `FP32`). -- `--qformat`: Quantization format (`fp8`, `nvfp4`) to apply. -- `--pre_quantized`: Flag to use pre-quantized models from HuggingFace. +- `--quant_format`: Quantization format (`fp8`, `nvfp4`) to apply. - `--num_tokens`: Number of output tokens to generate. - `--cache`: KV cache type (`static_v1`, `static_v2`, or empty for no KV caching). - `--benchmark`: Enable benchmarking mode. @@ -71,7 +70,7 @@ Torch-TensorRT supports quantization to reduce model memory footprint and improv To use pre-quantized models from HuggingFace: ```bash -python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --pre_quantized --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 ``` #### Applying quantization by ModelOpt @@ -79,7 +78,7 @@ python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --pre_quantized --pro Apply fp8 quantization from HuggingFace: ```bash -python run_llm.py --model meta-llama/Llama-3.1-8B --qformat fp8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +python run_llm.py --model meta-llama/Llama-3.1-8B --quant_format fp8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 ``` #### Quantization Requirements diff --git a/tools/llm/quantize_utils.py b/tools/llm/quantize_utils.py index a33d2d993f..8dab897a88 100644 --- a/tools/llm/quantize_utils.py +++ b/tools/llm/quantize_utils.py @@ -27,22 +27,22 @@ def quantize_model(model, args, tokenizer): """ - Quantize a PyTorch model using ModelOpt quantization. + Quantize a PyTorch model using ModelOpt post-training quantization (PTQ). - This function performs post-training quantization (PTQ) on the model using - calibration data from the provided tokenizer. It supports both FP8 and NVFP4 - quantization formats. + This function applies quantization to reduce model precision for faster inference + while maintaining acceptable accuracy. It uses calibration data generated from + the provided tokenizer to determine optimal quantization parameters. + Supported quantization formats: + - fp8: 8-bit floating point quantization + - nvfp4: 4-bit NVIDIA floating point quantization Args: - model: PyTorch model to quantize - args: Arguments containing quantization format and debug settings - tokenizer: Tokenizer for creating calibration dataloader + model: PyTorch model to quantize. Must be in evaluation mode. + args: Command line arguments containing quant_format and debug + tokenizer: Hugging Face tokenizer for creating calibration data Returns: - Quantized model with reduced precision weights and activations - - Raises: - RuntimeError: If unsupported quantization format is specified + Quantized model """ # Create calibration dataloader for quantization calib_dataloader = get_dataset_dataloader( @@ -51,9 +51,9 @@ def quantize_model(model, args, tokenizer): num_samples=512, device="cuda:0", ) - if args.qformat == "fp8": + if args.quant_format == "fp8": quant_cfg = mtq.FP8_DEFAULT_CFG - elif args.qformat == "nvfp4": + elif args.quant_format == "nvfp4": quant_cfg = mtq.NVFP4_DEFAULT_CFG else: raise RuntimeError("Unsupported quantization format") @@ -108,7 +108,38 @@ def forward(self, input): return torch.nn.functional.linear(input, weight, self.bias) -def convert_linear_to_tensorrt_quantized(model, model_name): +def load_quantization_config(model_name): + """ + Load quantization configuration from a Hugging Face model. + Args: + model_name (str): Local directory path or model identifier + Returns: + dict or None: Quantization configuration. None if no config found. + """ + # Determine if model_name is a local directory or needs to be downloaded + if os.path.isdir(model_name): + model_path = model_name + else: + # Download model from Hugging Face Hub + model_path = snapshot_download( + model_name, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ignore_patterns=["original/**/*"], + revision=None, + ) + hf_quant_config = None + # Load and parse quantization configuration + hf_quant_config_path = f"{model_path}/hf_quant_config.json" + if os.path.exists(hf_quant_config_path): + with open(hf_quant_config_path, "r") as f: + hf_quant_config = json.load(f) + hf_quant_config = hf_quant_config["quantization"] + hf_quant_config["model_path"] = model_path + + return hf_quant_config + + +def convert_linear_to_tensorrt_quantized(model, hf_quant_config): """ Convert linear layers in a model to TensorRT quantized versions from pre-quantized weights. @@ -119,16 +150,15 @@ def convert_linear_to_tensorrt_quantized(model, model_name): The function: 1. Loads quantization scales from Hugging Face model files (SafeTensors) - 2. Parses quantization configuration from hf_quant_config.json - 3. Replaces standard linear layers with TensorRTQuantizedLinear layers - 4. Applies appropriate quantization based on the model's quantization format + 2. Replaces standard linear layers with TensorRTQuantizedLinear layers + 3. Applies appropriate quantization based on the model's quantization format Note: This function only quantizes linear operations and is intended for use with pre-quantized Hugging Face models that have been quantized using ModelOpt. Args: model: PyTorch model to quantize - model_name: Path to Hugging Face model directory or model identifier + hf_quant_config: Quantization configuration Returns: Model with quantized linear layers @@ -136,41 +166,21 @@ def convert_linear_to_tensorrt_quantized(model, model_name): Raises: RuntimeError: If quantization config is not found or unsupported format """ - # Determine if model_name is a local directory or needs to be downloaded - if os.path.isdir(model_name): - hf_folder = model_name - else: - # Download model from Hugging Face Hub - hf_folder = snapshot_download( - model_name, - local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, - ignore_patterns=["original/**/*"], - revision=None, - ) - + model_path = hf_quant_config["model_path"] # Load all tensors from SafeTensors files tensors = {} - for file in os.listdir(hf_folder): + for file in os.listdir(model_path): if file.endswith(".safetensors"): with safe_open( - os.path.join(hf_folder, file), framework="pt", device="cpu" + os.path.join(model_path, file), framework="pt", device="cpu" ) as f: tensor_names = f.keys() for name in tensor_names: tensors[name] = f.get_tensor(name) - # Load and parse quantization configuration - hf_quant_config_path = f"{hf_folder}/hf_quant_config.json" - if os.path.exists(hf_quant_config_path): - with open(hf_quant_config_path, "r") as f: - hf_quant_config = json.load(f) - hf_quant_config = hf_quant_config["quantization"] - - hf_quant_algo = hf_quant_config.pop("quant_algo", None) - if hf_quant_algo != "FP8" and hf_quant_algo != "NVFP4": - raise RuntimeError("Only FP8 or NVFP4 quantization is supported") - else: - raise RuntimeError("No quantization config found") + hf_quant_algo = hf_quant_config.get("quant_algo", None) + if hf_quant_algo != "FP8" and hf_quant_algo != "NVFP4": + raise RuntimeError("Only FP8 or NVFP4 quantization is supported") # Iterate through all modules in the model for name, module in model.named_modules(): diff --git a/tools/llm/run_llm.py b/tools/llm/run_llm.py index 31b1e36e96..feced87ad0 100644 --- a/tools/llm/run_llm.py +++ b/tools/llm/run_llm.py @@ -19,6 +19,12 @@ # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ import torch import torch_tensorrt +from modelopt.torch.quantization.utils import export_torch_mode +from quantize_utils import ( + convert_linear_to_tensorrt_quantized, + load_quantization_config, + quantize_model, +) from torchtrt_ext import register_sdpa from transformers import AutoModelForCausalLM, AutoTokenizer from utils import ( @@ -62,8 +68,11 @@ def get_model(args): ) # register SDPA variant for the model register_sdpa.enable_sdpa_converter(args.model, model.config) - if args.pre_quantized: - model = convert_linear_to_tensorrt_quantized(model, args.model).cuda() + + hf_quant_config = load_quantization_config(args.model) + if hf_quant_config: + model = convert_linear_to_tensorrt_quantized(model, hf_quant_config).cuda() + print(f"Model converted to TensorRT quantized") if args.precision == "FP16": model = model.to(torch.float16) @@ -97,7 +106,7 @@ def compile_torchtrt(model, input_ids, args): for optimized inference """ max_seq_len = input_ids.shape[1] + args.num_tokens - with export_torch_mode() if args.qformat or args.pre_quantized else nullcontext(): + with export_torch_mode(): ep = export_llm(model, input_ids, max_seq_len=max_seq_len) position_ids = torch.arange(input_ids.shape[1]).unsqueeze(0).to(DEVICE) # Set precision specific flags @@ -242,28 +251,12 @@ def measure_perf(trt_model, input_signature, backend_name): "--benchmark", action="store_true", help="Enable benchmark (default: False)" ) arg_parser.add_argument( - "--qformat", + "--quant_format", help=("Apply quantization format. Options: fp8, nvfp4 (default: None)"), default=None, ) - arg_parser.add_argument( - "--pre_quantized", - action="store_true", - help="Use pre-quantized hf model weights (default: False)", - ) args = arg_parser.parse_args() - if args.qformat and args.pre_quantized: - print("Error: --qformat and --pre_quantized cannot be used together") - exit() - - if args.qformat or args.pre_quantized: - from modelopt.torch.quantization.utils import export_torch_mode - from quantize_utils import ( - convert_linear_to_tensorrt_quantized, - quantize_model, - ) - with torch.inference_mode(): model = get_model(args) @@ -288,7 +281,7 @@ def measure_perf(trt_model, input_signature, backend_name): pyt_timings = None pyt_stats = None - if args.qformat != None: + if args.quant_format != None: model = quantize_model(model, args, tokenizer) if args.enable_pytorch_run: pyt_gen_tokens = generate( From 6b3f7f9e269c910525c40df2ffd38d1147ae59b3 Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Thu, 4 Sep 2025 13:56:23 +0000 Subject: [PATCH 3/6] feat: Expose quantization API in torch_tensorrt.dynamo --- py/torch_tensorrt/dynamo/__init__.py | 1 + py/torch_tensorrt/dynamo/_quantization.py | 35 +++++++++++++++++++++++ tools/llm/quantize_utils.py | 15 ++++------ 3 files changed, 41 insertions(+), 10 deletions(-) create mode 100644 py/torch_tensorrt/dynamo/_quantization.py diff --git a/py/torch_tensorrt/dynamo/__init__.py b/py/torch_tensorrt/dynamo/__init__.py index 607dca76bf..4ecd97d09b 100644 --- a/py/torch_tensorrt/dynamo/__init__.py +++ b/py/torch_tensorrt/dynamo/__init__.py @@ -15,6 +15,7 @@ save_cross_compiled_exported_program, ) from ._exporter import export + from ._quantization import quantize from ._refit import refit_module_weights from ._settings import CompilationSettings from ._SourceIR import SourceIR diff --git a/py/torch_tensorrt/dynamo/_quantization.py b/py/torch_tensorrt/dynamo/_quantization.py new file mode 100644 index 0000000000..8f01dc04bf --- /dev/null +++ b/py/torch_tensorrt/dynamo/_quantization.py @@ -0,0 +1,35 @@ +import logging +from typing import Any, Callable + +import torch + +logger = logging.getLogger(__name__) + + +def quantize( + model: torch.nn.Module, + quant_format: str, + calibrate_loop: Callable[[], Any], + debug: bool = False, +) -> torch.nn.Module: + try: + import modelopt.torch.quantization as mtq + + assert torch.ops.tensorrt.quantize_op.default + except Exception: + logger.warning( + "Unable to import quantization op. Please install modelopt library" + ) + + if quant_format == "fp8": + quant_cfg = mtq.FP8_DEFAULT_CFG + elif quant_format == "nvfp4": + quant_cfg = mtq.NVFP4_DEFAULT_CFG + else: + raise RuntimeError("Unsupported quantization format") + + quantized_model = mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop) + if debug: + mtq.print_quant_summary(quantized_model) + + return quantized_model diff --git a/tools/llm/quantize_utils.py b/tools/llm/quantize_utils.py index 8dab897a88..12f20aed43 100644 --- a/tools/llm/quantize_utils.py +++ b/tools/llm/quantize_utils.py @@ -4,6 +4,7 @@ import huggingface_hub import torch +import torch_tensorrt from huggingface_hub import snapshot_download logger = logging.getLogger(__name__) @@ -51,17 +52,11 @@ def quantize_model(model, args, tokenizer): num_samples=512, device="cuda:0", ) - if args.quant_format == "fp8": - quant_cfg = mtq.FP8_DEFAULT_CFG - elif args.quant_format == "nvfp4": - quant_cfg = mtq.NVFP4_DEFAULT_CFG - else: - raise RuntimeError("Unsupported quantization format") - calibrate_loop = create_forward_loop(dataloader=calib_dataloader) - model = mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop) - if args.debug: - mtq.print_quant_summary(model) + calibrate_loop = create_forward_loop(dataloader=calib_dataloader) + model = torch_tensorrt.dynamo.quantize( + model, args.quant_format, calibrate_loop, debug=args.debug + ) return model From 1dd7761d14b97179737f335e2bc53830ea13991c Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Sat, 20 Sep 2025 03:07:30 +0000 Subject: [PATCH 4/6] chore: address reviews --- py/torch_tensorrt/dynamo/__init__.py | 1 - py/torch_tensorrt/dynamo/_quantization.py | 35 ---------------- tools/llm/README.md | 21 +++++++--- tools/llm/quantize_utils.py | 50 ++++++++++++++--------- tools/llm/run_llm.py | 26 ++++++++---- tools/llm/utils.py | 2 +- 6 files changed, 63 insertions(+), 72 deletions(-) delete mode 100644 py/torch_tensorrt/dynamo/_quantization.py diff --git a/py/torch_tensorrt/dynamo/__init__.py b/py/torch_tensorrt/dynamo/__init__.py index 4ecd97d09b..607dca76bf 100644 --- a/py/torch_tensorrt/dynamo/__init__.py +++ b/py/torch_tensorrt/dynamo/__init__.py @@ -15,7 +15,6 @@ save_cross_compiled_exported_program, ) from ._exporter import export - from ._quantization import quantize from ._refit import refit_module_weights from ._settings import CompilationSettings from ._SourceIR import SourceIR diff --git a/py/torch_tensorrt/dynamo/_quantization.py b/py/torch_tensorrt/dynamo/_quantization.py deleted file mode 100644 index 8f01dc04bf..0000000000 --- a/py/torch_tensorrt/dynamo/_quantization.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging -from typing import Any, Callable - -import torch - -logger = logging.getLogger(__name__) - - -def quantize( - model: torch.nn.Module, - quant_format: str, - calibrate_loop: Callable[[], Any], - debug: bool = False, -) -> torch.nn.Module: - try: - import modelopt.torch.quantization as mtq - - assert torch.ops.tensorrt.quantize_op.default - except Exception: - logger.warning( - "Unable to import quantization op. Please install modelopt library" - ) - - if quant_format == "fp8": - quant_cfg = mtq.FP8_DEFAULT_CFG - elif quant_format == "nvfp4": - quant_cfg = mtq.NVFP4_DEFAULT_CFG - else: - raise RuntimeError("Unsupported quantization format") - - quantized_model = mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop) - if debug: - mtq.print_quant_summary(quantized_model) - - return quantized_model diff --git a/tools/llm/README.md b/tools/llm/README.md index d9d59086df..6e29356dfd 100644 --- a/tools/llm/README.md +++ b/tools/llm/README.md @@ -39,7 +39,7 @@ We have officially verified support for the following models: #### Text-only LLMs: `run_llm.py` ```bash -python run_llm.py --model meta-llama/Llama-3.2-1B-Instruct --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 --cache static_v2 --benchmark +python run_llm.py --model meta-llama/Llama-3.2-1B-Instruct --prompt "What is parallel programming?" --model_precision FP16 --num_tokens 128 --cache static_v2 --benchmark ``` #### Vision Language Models: `run_vlm.py` @@ -54,8 +54,8 @@ python run_vlm.py --model nvidia/Eagle2-2B --precision FP16 --num_tokens 128 --c - `--tokenizer`: (Optional) Tokenizer name; defaults to model. - `--prompt`: Input prompt for generation. - `--image_path`: (Optional) Path to input image file for VLM models. If not provided, will use a sample image. -- `--precision`: Precision mode (`FP16`, `FP32`). -- `--quant_format`: Quantization format (`fp8`, `nvfp4`) to apply. +- `--model_precision`: Precision of model weight/buffer (`FP16`, `BF16`, `FP32`). +- `--quant_format`: (Optional) Quantization format (`fp8`, `nvfp4`) to apply. - `--num_tokens`: Number of output tokens to generate. - `--cache`: KV cache type (`static_v1`, `static_v2`, or empty for no KV caching). - `--benchmark`: Enable benchmarking mode. @@ -68,17 +68,26 @@ Torch-TensorRT supports quantization to reduce model memory footprint and improv #### Using Pre-quantized Models To use pre-quantized models from HuggingFace: +If a model contains quantization configuration (detected automatically), the model's linear layers are converted to TensorRT quantized versions using the specified quantization algorithm (e.g., FP8, NVFP4). The quantization algorithm type is displayed during conversion. + +**Note:** The `--quant_format` option will raise an error if it's used with pre-quantized models, as quantization cannot be applied to models that are already quantized. ```bash -python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +python run_llm.py --model nvidia/Llama-3.1-8B-Instruct-FP8 --prompt "What is parallel programming?" --model_precision FP16 --num_tokens 128 +``` + +**Expected output:** +``` +Model is FP8 pre-quantized hf model. Quantized linear layers are applied ``` #### Applying quantization by ModelOpt -Apply fp8 quantization from HuggingFace: +To apply quantization to non-quantized models using ModelOpt: +The `--quant_format` option calls `mtq.quantize()` to apply ModelOpt post-training quantization to the model. ```bash -python run_llm.py --model meta-llama/Llama-3.1-8B --quant_format fp8 --prompt "What is parallel programming?" --precision FP16 --num_tokens 128 +python run_llm.py --model meta-llama/Llama-3.1-8B --quant_format fp8 --prompt "What is parallel programming?" --model_precision FP16 --num_tokens 128 ``` #### Quantization Requirements diff --git a/tools/llm/quantize_utils.py b/tools/llm/quantize_utils.py index 12f20aed43..8c6076447a 100644 --- a/tools/llm/quantize_utils.py +++ b/tools/llm/quantize_utils.py @@ -4,7 +4,6 @@ import huggingface_hub import torch -import torch_tensorrt from huggingface_hub import snapshot_download logger = logging.getLogger(__name__) @@ -25,6 +24,11 @@ ) from safetensors import safe_open +# FP8 E4M3 format has a maximum representable value of 448.0 +MAX_BOUND_FP8 = 448.0 +# Additional scaling factor for NVFP4 +MAX_BOUND_NVFP4 = 6.0 + def quantize_model(model, args, tokenizer): """ @@ -52,11 +56,17 @@ def quantize_model(model, args, tokenizer): num_samples=512, device="cuda:0", ) - + if args.quant_format == "fp8": + quant_cfg = mtq.FP8_DEFAULT_CFG + elif args.quant_format == "nvfp4": + quant_cfg = mtq.NVFP4_DEFAULT_CFG + else: + raise RuntimeError("Unsupported quantization format") calibrate_loop = create_forward_loop(dataloader=calib_dataloader) - model = torch_tensorrt.dynamo.quantize( - model, args.quant_format, calibrate_loop, debug=args.debug - ) + + model = mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop) + if args.debug: + mtq.print_quant_summary(model) return model @@ -83,12 +93,6 @@ def __init__( # Store reference to original linear layer for weight access self.original_linear = original_linear - # Copy bias from original layer if it exists - if original_linear.bias is not None: - self.bias = torch.nn.Parameter(original_linear.bias.clone()).cuda() - else: - self.bias = None - # Create quantizers for input and weight tensors self.input_quantizer = TensorQuantizer( quant_attribute_cfg=quant_cfg, amax=input_amax @@ -100,7 +104,7 @@ def __init__( def forward(self, input): input = self.input_quantizer(input) weight = self.weight_quantizer(self.original_linear.weight) - return torch.nn.functional.linear(input, weight, self.bias) + return torch.nn.functional.linear(input, weight, self.original_linear.bias) def load_quantization_config(model_name): @@ -134,7 +138,7 @@ def load_quantization_config(model_name): return hf_quant_config -def convert_linear_to_tensorrt_quantized(model, hf_quant_config): +def convert_linear_to_tensorrt_quantized(model, model_precision, hf_quant_config): """ Convert linear layers in a model to TensorRT quantized versions from pre-quantized weights. @@ -177,6 +181,13 @@ def convert_linear_to_tensorrt_quantized(model, hf_quant_config): if hf_quant_algo != "FP8" and hf_quant_algo != "NVFP4": raise RuntimeError("Only FP8 or NVFP4 quantization is supported") + if model_precision == "FP16": + weight_dtype = torch.float16 + elif model_precision == "BF16": + weight_dtype = torch.bfloat16 + else: + weight_dtype = torch.float32 + # Iterate through all modules in the model for name, module in model.named_modules(): # Check if the module is a linear layer @@ -195,14 +206,13 @@ def convert_linear_to_tensorrt_quantized(model, hf_quant_config): continue if hf_quant_algo == "FP8": - # FP8 E4M3 format has a maximum representable value of 448.0 # Scale the quantization parameters accordingly weight_scale = tensors.pop(weight_scale_name) - weight_amax = weight_scale * 448.0 - input_amax = tensors.pop(input_scale_name) * 448.0 + weight_amax = weight_scale * MAX_BOUND_FP8 + input_amax = tensors.pop(input_scale_name) * MAX_BOUND_FP8 # Dequantize the weight using the scale factor - dequantized_weight_data = module.weight.to(torch.float32) * weight_scale + dequantized_weight_data = module.weight.to(weight_dtype) * weight_scale # Configure quantizer for FP8 format (4 exponent bits, 3 mantissa bits) quantizer_attribute_config = QuantizerAttributeConfig( @@ -218,15 +228,15 @@ def convert_linear_to_tensorrt_quantized(model, hf_quant_config): weight_scale2 = tensors.pop(weight_scale2_name) # Calculate amax values with additional scaling factor for NVFP4 - input_amax = input_scale * 448.0 * 6.0 - weight_amax = weight_scale2 * 448.0 * 6.0 + input_amax = input_scale * MAX_BOUND_FP8 * MAX_BOUND_NVFP4 + weight_amax = weight_scale2 * MAX_BOUND_FP8 * MAX_BOUND_NVFP4 # Handle NVFP4 tensor format weight_data = tensors.pop(weight_name) original_shape = list(weight_data.shape) original_shape[-1] *= 2 # NVFP4 packs 2 values per element nvfp4_tensor = NVFP4QTensor( - torch.Size(original_shape), torch.float32, weight_data + torch.Size(original_shape), weight_dtype, weight_data ) # Dequantize using both scales and block size configuration diff --git a/tools/llm/run_llm.py b/tools/llm/run_llm.py index feced87ad0..21776d8674 100644 --- a/tools/llm/run_llm.py +++ b/tools/llm/run_llm.py @@ -71,12 +71,20 @@ def get_model(args): hf_quant_config = load_quantization_config(args.model) if hf_quant_config: - model = convert_linear_to_tensorrt_quantized(model, hf_quant_config).cuda() - print(f"Model converted to TensorRT quantized") + model = convert_linear_to_tensorrt_quantized( + model, args.model_precision, hf_quant_config + ).cuda() + print( + f"Model is {hf_quant_config['quant_algo']} pre-quantized hf model. Quantized linear layers are applied" + ) + if args.quant_format: + raise RuntimeError( + f"Quantization cannot be applied for pre-quantized hf model" + ) - if args.precision == "FP16": + if args.model_precision == "FP16": model = model.to(torch.float16) - elif args.precision == "BF16": + elif args.model_precision == "BF16": model = model.to(torch.bfloat16) else: model = model.to(torch.float32) @@ -112,11 +120,11 @@ def compile_torchtrt(model, input_ids, args): # Set precision specific flags use_fp32_acc = False use_explicit_typing = False - if args.precision == "FP16": + if args.model_precision == "FP16": enabled_precisions = {torch.float32} use_fp32_acc = True use_explicit_typing = True - elif args.precision == "BF16": + elif args.model_precision == "BF16": enabled_precisions = {torch.bfloat16} use_fp32_acc = False else: @@ -204,7 +212,7 @@ def measure_perf(trt_model, input_signature, backend_name): "--prompt", type=str, default="What is parallel programming ?", help="Prompt" ) arg_parser.add_argument( - "--precision", + "--model_precision", type=str, default="FP16", help="Precision to use in the model. Options: FP16, BF16, FP32", @@ -299,7 +307,7 @@ def measure_perf(trt_model, input_signature, backend_name): pyt_stats = record_stats( "PyTorch", pyt_timings, - args.precision, + args.model_precision, batch_size=args.batch_size, compile_time_s=None, ) @@ -357,7 +365,7 @@ def measure_perf(trt_model, input_signature, backend_name): trt_stats = record_stats( "TensorRT", trt_timings, - args.precision, + args.model_precision, batch_size=args.batch_size, compile_time_s=None, ) diff --git a/tools/llm/utils.py b/tools/llm/utils.py index b9e3506f4b..7d9507321e 100644 --- a/tools/llm/utils.py +++ b/tools/llm/utils.py @@ -247,7 +247,7 @@ def record_stats(backend, timings, precision, batch_size=1, compile_time_s=None) stats = { "Backend": backend, - "Precision": precision, + "Model Precision": precision, "Batch size": batch_size, "Median(FPS)": speed_med, "Mean(FPS)": speed_mean, From 83c5df7300e8b1a535cfbd5a8a44966d2f72891a Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Mon, 22 Sep 2025 06:26:52 +0000 Subject: [PATCH 5/6] chore: api change in modelopt 0.35 --- tools/llm/quantize_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llm/quantize_utils.py b/tools/llm/quantize_utils.py index 8c6076447a..605931c003 100644 --- a/tools/llm/quantize_utils.py +++ b/tools/llm/quantize_utils.py @@ -15,9 +15,9 @@ except Exception: logger.warning("Unable to import quantization op. Please install modelopt library") -from modelopt.core.torch.quantization.qtensor.nvfp4_tensor import NVFP4QTensor from modelopt.torch.quantization.config import QuantizerAttributeConfig from modelopt.torch.quantization.nn.modules.tensor_quantizer import TensorQuantizer +from modelopt.torch.quantization.qtensor.nvfp4_tensor import NVFP4QTensor from modelopt.torch.utils.dataset_utils import ( create_forward_loop, get_dataset_dataloader, From 95f8e5bd6af2ea7aa282fa2bf2d16120fdf057b9 Mon Sep 17 00:00:00 2001 From: kee hyun an Date: Thu, 2 Oct 2025 01:23:56 +0000 Subject: [PATCH 6/6] chore: precision -> model_precision option --- tests/py/dynamo/llm/test_llm_models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/py/dynamo/llm/test_llm_models.py b/tests/py/dynamo/llm/test_llm_models.py index 73811572f9..3899d5dd93 100644 --- a/tests/py/dynamo/llm/test_llm_models.py +++ b/tests/py/dynamo/llm/test_llm_models.py @@ -23,15 +23,15 @@ def test_llm_decoder_layer(precision): args.debug = False args.num_tokens = 128 args.model = "Qwen/Qwen2.5-0.5B-Instruct" - args.precision = precision + args.model_precision = precision args.min_block_size = 1 args.prompt = "What is parallel programming ?" - if args.precision == "FP16": + if args.model_precision == "FP16": dtype = torch.float16 - elif args.precision == "BF16": + elif args.model_precision == "BF16": dtype = torch.bfloat16 else: - args.precision = "FP32" + args.model_precision = "FP32" dtype = torch.float32 model = (