Skip to content

Commit

Permalink
Fix MLA and logic for using triton scaled_mm on ROCm as blockwise FP8…
Browse files Browse the repository at this point in the history
… quant is not supported
  • Loading branch information
mawong-amd authored and hongxiayang committed Feb 2, 2025
1 parent 3194039 commit 0c1cc40
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 1 deletion.
5 changes: 4 additions & 1 deletion vllm/attention/backends/mla/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@
from vllm.model_executor.layers.quantization.utils.quant_utils import (
scaled_dequantize, scaled_quantize)
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
from vllm.vllm_flash_attn import flash_attn_varlen_func
try:
from vllm.vllm_flash_attn import flash_attn_varlen_func
except ImportError:
from flash_attn import flash_attn_varlen_func


@dataclass
Expand Down
11 changes: 11 additions & 0 deletions vllm/model_executor/layers/quantization/utils/fp8_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,17 @@ def apply_w8a8_block_fp8_linear(

shape_supported_by_cutlass = (weight.shape[0] % 128 == 0
and weight.shape[1] % 128 == 0)
if current_platform.is_rocm():
scale_a_shape = ((input_2d.shape[-1] // block_size[1], )
+ input_2d.shape[:-1])[::-1]
scale_b_shape = (weight_scale.view(-1, 1) if weight_scale.dim() <= 1
else weight_scale.T).shape
ar, ac = scale_a_shape
br, bc = scale_b_shape
if (ac > 1 or bc > 1 or
ar not in (1, input_2d.shape[0]) or
br not in (1, weight.shape[0])):
shape_supported_by_cutlass = False
if cutlass_block_fp8_supported and shape_supported_by_cutlass:
q_input, x_scale = per_token_group_quant_fp8(input_2d,
block_size[1],
Expand Down
6 changes: 6 additions & 0 deletions vllm/platforms/rocm.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ class RocmPlatform(Platform):
def get_attn_backend_cls(cls, selected_backend, head_size, dtype,
kv_cache_dtype, block_size, use_v1,
use_mla) -> str:
if use_mla:
if selected_backend and selected_backend != _Backend.TRITON_MLA:
logger.warning(f"Cannot use {selected_backend.name} "
"backend for MLA.")
logger.info("Using Triton MLA backend.")
return "vllm.attention.backends.triton_mla.TritonMLABackend"
selected_backend = (_Backend.ROCM_FLASH if selected_backend
== _Backend.FLASH_ATTN else selected_backend)
if selected_backend == _Backend.ROCM_FLASH:
Expand Down

0 comments on commit 0c1cc40

Please # to comment.