From 4896d0c2dd367efbdf8387028322bf5f74359930 Mon Sep 17 00:00:00 2001 From: Kyle Sayers Date: Tue, 4 Feb 2025 02:27:11 -0500 Subject: [PATCH] [Quant] Fix use_mla TypeError and support loading pure-sparsity Compressed Tensors configs (#12711) --- vllm/config.py | 5 +++-- .../quantization/compressed_tensors/compressed_tensors.py | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 2f4a7ad769d98..bc4bf627b8e74 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1000,8 +1000,9 @@ def use_mla(self) -> bool: # have fp8 for both weights and activations. if self.quantization == "compressed-tensors": quant_config = self._parse_quant_hf_config() - for group_name, cfg in quant_config.get("config_groups", - ("", {})).items(): + for group_name, cfg in quant_config.get("config_groups", { + "": {} + }).items(): act_cfg = cfg.get("input_activations", {}) act_type = None if act_cfg is None else act_cfg.get("type", "") w_cfg = cfg.get("weights", {}) diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py index 24f7542e12385..1a11b2419cc88 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py @@ -424,6 +424,11 @@ def get_scheme(self, or input_quant is not None, weight_quant=weight_quant, input_quant=input_quant) + elif weight_quant is None: + logger.warning_once("Acceleration for non-quantized schemes is " + "not supported by Compressed Tensors. " + "Falling back to UnquantizedLinearMethod") + return None else: # Find the quant_scheme scheme = self._get_scheme_from_parts( # type: ignore