Skip to content

Commit

Permalink
[Quant] Fix use_mla TypeError and support loading pure-sparsity Compr…
Browse files Browse the repository at this point in the history
…essed Tensors configs (vllm-project#12711)
  • Loading branch information
kylesayrs authored Feb 4, 2025
1 parent bb392af commit 4896d0c
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 2 deletions.
5 changes: 3 additions & 2 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,8 +1000,9 @@ def use_mla(self) -> bool:
# have fp8 for both weights and activations.
if self.quantization == "compressed-tensors":
quant_config = self._parse_quant_hf_config()
for group_name, cfg in quant_config.get("config_groups",
("", {})).items():
for group_name, cfg in quant_config.get("config_groups", {
"": {}
}).items():
act_cfg = cfg.get("input_activations", {})
act_type = None if act_cfg is None else act_cfg.get("type", "")
w_cfg = cfg.get("weights", {})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,11 @@ def get_scheme(self,
or input_quant is not None,
weight_quant=weight_quant,
input_quant=input_quant)
elif weight_quant is None:
logger.warning_once("Acceleration for non-quantized schemes is "
"not supported by Compressed Tensors. "
"Falling back to UnquantizedLinearMethod")
return None
else:
# Find the quant_scheme
scheme = self._get_scheme_from_parts( # type: ignore
Expand Down

0 comments on commit 4896d0c

Please sign in to comment.