Skip to content

Commit 7613999

Browse files
heheda12345garg-amit
authored andcommitted
[Kernel] Support sliding window in flash attention backend (vllm-project#9403)
Signed-off-by: Amit Garg <mitgarg17495@gmail.com>
1 parent 9447417 commit 7613999

13 files changed

+41
-61
lines changed

tests/kernels/test_attention_selector.py

+15-20
Original file line numberDiff line numberDiff line change
@@ -20,21 +20,21 @@ def test_env(name: str, device: str, monkeypatch):
2020

2121
if device == "cpu":
2222
with patch("vllm.attention.selector.is_cpu", return_value=True):
23-
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
24-
16, False)
23+
backend = which_attn_to_use(16, torch.float16, torch.float16, 16,
24+
False)
2525
assert backend.name == "TORCH_SDPA"
2626
elif device == "hip":
2727
with patch("vllm.attention.selector.is_hip", return_value=True):
28-
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
29-
16, False)
28+
backend = which_attn_to_use(16, torch.float16, torch.float16, 16,
29+
False)
3030
assert backend.name == "ROCM_FLASH"
3131
elif device == "openvino":
3232
with patch("vllm.attention.selector.is_openvino", return_value=True):
33-
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
34-
16, False)
33+
backend = which_attn_to_use(16, torch.float16, torch.float16, 16,
34+
False)
3535
assert backend.name == "OPENVINO"
3636
else:
37-
backend = which_attn_to_use(16, None, torch.float16, torch.float16, 16,
37+
backend = which_attn_to_use(16, torch.float16, torch.float16, 16,
3838
False)
3939
assert backend.name == name
4040

@@ -46,42 +46,37 @@ def test_flash_attn(monkeypatch):
4646

4747
# Unsupported CUDA arch
4848
with patch("torch.cuda.get_device_capability", return_value=(7, 5)):
49-
backend = which_attn_to_use(16, None, torch.float16, None, 16, False)
49+
backend = which_attn_to_use(16, torch.float16, None, 16, False)
5050
assert backend.name != STR_FLASH_ATTN_VAL
5151

5252
# Unsupported data type
53-
backend = which_attn_to_use(16, None, torch.float8_e4m3fn, None, 16, False)
53+
backend = which_attn_to_use(16, torch.float8_e4m3fn, None, 16, False)
5454
assert backend.name != STR_FLASH_ATTN_VAL
5555

5656
# Unsupported kv cache data type
57-
backend = which_attn_to_use(16, None, torch.float16, "fp8", 16, False)
57+
backend = which_attn_to_use(16, torch.float16, "fp8", 16, False)
5858
assert backend.name != STR_FLASH_ATTN_VAL
5959

6060
# Unsupported block size
61-
backend = which_attn_to_use(16, None, torch.float16, None, 8, False)
62-
assert backend.name != STR_FLASH_ATTN_VAL
63-
64-
# Unsupported sliding window
65-
backend = which_attn_to_use(16, 1, torch.float16, None, 16, False)
61+
backend = which_attn_to_use(16, torch.float16, None, 8, False)
6662
assert backend.name != STR_FLASH_ATTN_VAL
6763

6864
# flash-attn is not installed
6965
with patch.dict('sys.modules', {'vllm_flash_attn': None}):
70-
backend = which_attn_to_use(16, None, torch.float16, None, 16, False)
66+
backend = which_attn_to_use(16, torch.float16, None, 16, False)
7167
assert backend.name != STR_FLASH_ATTN_VAL
7268

7369
# Unsupported head size
74-
backend = which_attn_to_use(17, None, torch.float16, None, 16, False)
70+
backend = which_attn_to_use(17, torch.float16, None, 16, False)
7571
assert backend.name != STR_FLASH_ATTN_VAL
7672

7773
# Attention-free models should bypass env and use PlaceholderAttention
78-
backend = which_attn_to_use(16, None, torch.float16, torch.float16, 16,
79-
True)
74+
backend = which_attn_to_use(16, torch.float16, torch.float16, 16, True)
8075
assert backend.name != STR_FLASH_ATTN_VAL
8176

8277

8378
def test_invalid_env(monkeypatch):
8479
"""Throw an exception if the backend name is invalid."""
8580
override_backend_env_variable(monkeypatch, STR_INVALID_VAL)
8681
with pytest.raises(ValueError):
87-
which_attn_to_use(16, None, torch.float16, None, 16, False)
82+
which_attn_to_use(16, torch.float16, None, 16, False)

tests/kernels/test_flash_attn.py

+16-13
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ def ref_paged_attn(
7878
@pytest.mark.parametrize("dtype", DTYPES)
7979
@pytest.mark.parametrize("soft_cap", [None, 10.0, 50.0])
8080
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
81+
@pytest.mark.parametrize("sliding_window", [None, 256])
8182
@torch.inference_mode()
8283
def test_flash_attn_with_paged_kv(
8384
kv_lens: List[int],
@@ -87,6 +88,7 @@ def test_flash_attn_with_paged_kv(
8788
block_size: int,
8889
soft_cap: Optional[float],
8990
num_blocks: int,
91+
sliding_window: Optional[int],
9092
) -> None:
9193
torch.set_default_device("cuda")
9294
seed_everything(0)
@@ -96,6 +98,8 @@ def test_flash_attn_with_paged_kv(
9698
assert num_query_heads % num_kv_heads == 0
9799
max_kv_len = max(kv_lens)
98100
scale = head_size**-0.5
101+
window_size = ((sliding_window - 1, 0) if sliding_window is not None else
102+
(-1, -1))
99103

100104
query = torch.randn(num_seqs, num_query_heads, head_size, dtype=dtype)
101105
key_cache = torch.randn(num_blocks,
@@ -121,18 +125,18 @@ def test_flash_attn_with_paged_kv(
121125
block_table=block_tables,
122126
cache_seqlens=kv_lens_tensor,
123127
softcap=soft_cap if soft_cap is not None else 0,
128+
window_size=window_size,
124129
).squeeze(1)
125130

126-
ref_output = ref_paged_attn(
127-
query=query,
128-
key_cache=key_cache,
129-
value_cache=value_cache,
130-
query_lens=[1] * num_seqs,
131-
kv_lens=kv_lens,
132-
block_tables=block_tables,
133-
scale=scale,
134-
soft_cap=soft_cap,
135-
)
131+
ref_output = ref_paged_attn(query=query,
132+
key_cache=key_cache,
133+
value_cache=value_cache,
134+
query_lens=[1] * num_seqs,
135+
kv_lens=kv_lens,
136+
block_tables=block_tables,
137+
scale=scale,
138+
soft_cap=soft_cap,
139+
sliding_window=sliding_window)
136140
torch.testing.assert_close(output, ref_output, atol=2e-2, rtol=1e-2), \
137141
f"{torch.max(torch.abs(output - ref_output))}"
138142

@@ -141,7 +145,7 @@ def test_flash_attn_with_paged_kv(
141145
@pytest.mark.parametrize("num_heads", NUM_HEADS)
142146
@pytest.mark.parametrize("head_size", HEAD_SIZES)
143147
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
144-
@pytest.mark.parametrize("sliding_window", [None])
148+
@pytest.mark.parametrize("sliding_window", [None, 256])
145149
@pytest.mark.parametrize("dtype", DTYPES)
146150
@pytest.mark.parametrize("soft_cap", [None, 10.0, 50.0])
147151
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@@ -166,8 +170,7 @@ def test_varlen_with_paged_kv(
166170
assert num_query_heads % num_kv_heads == 0
167171
max_query_len = max(query_lens)
168172
max_kv_len = max(kv_lens)
169-
window_size = ((sliding_window,
170-
sliding_window) if sliding_window is not None else
173+
window_size = ((sliding_window - 1, 0) if sliding_window is not None else
171174
(-1, -1))
172175
scale = head_size**-0.5
173176

vllm/attention/backends/flash_attn.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -536,8 +536,8 @@ def __init__(
536536
if alibi_slopes is not None:
537537
alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
538538
self.alibi_slopes = alibi_slopes
539-
self.sliding_window = ((sliding_window, sliding_window)
540-
if sliding_window is not None else (-1, -1))
539+
self.sliding_window = ((sliding_window - 1,
540+
0) if sliding_window is not None else (-1, -1))
541541
self.kv_cache_dtype = kv_cache_dtype
542542
if logits_soft_cap is None:
543543
# In flash-attn, setting logits_soft_cap as 0 means no soft cap.
@@ -547,12 +547,6 @@ def __init__(
547547
assert self.num_heads % self.num_kv_heads == 0
548548
self.num_queries_per_kv = self.num_heads // self.num_kv_heads
549549

550-
if sliding_window is not None:
551-
# NOTE(woosuk): flash-attn's sliding window does not work with
552-
# paged KV cache.
553-
raise ValueError(
554-
"Sliding window is not supported in FlashAttention.")
555-
556550
support_head_sizes = FlashAttentionBackend.get_supported_head_sizes()
557551
if head_size not in support_head_sizes:
558552
raise ValueError(
@@ -716,6 +710,7 @@ def unified_flash_attention(
716710
max_seqlen_k=max_seq_len,
717711
softmax_scale=softmax_scale,
718712
causal=True,
713+
window_size=window_size,
719714
alibi_slopes=alibi_slopes,
720715
block_table=prefill_meta.block_tables,
721716
softcap=logits_soft_cap,
@@ -737,6 +732,7 @@ def unified_flash_attention(
737732
max_seqlen_k=decode_meta.max_decode_seq_len,
738733
softmax_scale=softmax_scale,
739734
causal=True,
735+
window_size=window_size,
740736
alibi_slopes=alibi_slopes,
741737
softcap=logits_soft_cap,
742738
block_table=decode_meta.block_tables,
@@ -751,6 +747,7 @@ def unified_flash_attention(
751747
cache_seqlens=decode_meta.seq_lens_tensor,
752748
softmax_scale=softmax_scale,
753749
causal=True,
750+
window_size=window_size,
754751
alibi_slopes=alibi_slopes,
755752
softcap=logits_soft_cap,
756753
).squeeze(1)

vllm/attention/layer.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,9 @@ def __init__(
7878
# During model initialization, the default dtype is set as the model
7979
# weight and activation dtype.
8080
dtype = torch.get_default_dtype()
81-
attn_backend = get_attn_backend(head_size, sliding_window, dtype,
82-
kv_cache_dtype, block_size,
83-
is_attention_free, blocksparse_params
84-
is not None)
81+
attn_backend = get_attn_backend(head_size, dtype, kv_cache_dtype,
82+
block_size, is_attention_free,
83+
blocksparse_params is not None)
8584
impl_cls = attn_backend.get_impl_cls()
8685
self.impl = impl_cls(num_heads, head_size, scale, num_kv_heads,
8786
alibi_slopes, sliding_window, kv_cache_dtype,

vllm/attention/selector.py

+2-8
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ def get_global_forced_attn_backend() -> Optional[_Backend]:
9090
@lru_cache(maxsize=None)
9191
def get_attn_backend(
9292
head_size: int,
93-
sliding_window: Optional[int],
9493
dtype: torch.dtype,
9594
kv_cache_dtype: Optional[str],
9695
block_size: int,
@@ -105,8 +104,8 @@ def get_attn_backend(
105104
BlocksparseFlashAttentionBackend)
106105
return BlocksparseFlashAttentionBackend
107106

108-
backend = which_attn_to_use(head_size, sliding_window, dtype,
109-
kv_cache_dtype, block_size, is_attention_free)
107+
backend = which_attn_to_use(head_size, dtype, kv_cache_dtype, block_size,
108+
is_attention_free)
110109
if backend == _Backend.FLASH_ATTN:
111110
from vllm.attention.backends.flash_attn import ( # noqa: F401
112111
FlashAttentionBackend)
@@ -155,7 +154,6 @@ def get_attn_backend(
155154

156155
def which_attn_to_use(
157156
head_size: int,
158-
sliding_window: Optional[int],
159157
dtype: torch.dtype,
160158
kv_cache_dtype: Optional[str],
161159
block_size: int,
@@ -243,10 +241,6 @@ def which_attn_to_use(
243241
"Cannot use FlashAttention-2 backend for block size not "
244242
"divisible by 16.")
245243
selected_backend = _Backend.XFORMERS
246-
elif sliding_window is not None:
247-
logger.info(
248-
"Cannot use FlashAttention-2 backend due to sliding window.")
249-
selected_backend = _Backend.XFORMERS
250244

251245
# FlashAttn is valid for the model, checking if the package is installed.
252246
if selected_backend == _Backend.FLASH_ATTN:

vllm/worker/cache_engine.py

-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ def __init__(
5353

5454
# Get attention backend.
5555
self.attn_backend = get_attn_backend(self.head_size,
56-
model_config.get_sliding_window(),
5756
model_config.dtype,
5857
cache_config.cache_dtype,
5958
self.block_size,

vllm/worker/cpu_model_runner.py

-1
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,6 @@ def __init__(
436436
self.block_size = cache_config.block_size
437437
self.attn_backend = get_attn_backend(
438438
self.model_config.get_head_size(),
439-
self.model_config.get_sliding_window(),
440439
self.model_config.dtype,
441440
self.kv_cache_dtype,
442441
self.block_size,

vllm/worker/cpu_worker.py

-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@ def __init__(self, cache_config: CacheConfig, model_config: ModelConfig,
5757
# Get attention backend.
5858
self.attn_backend = get_attn_backend(
5959
self.model_config.get_head_size(),
60-
self.model_config.get_sliding_window(),
6160
self.model_config.dtype,
6261
cache_config.cache_dtype,
6362
self.block_size,

vllm/worker/model_runner.py

-1
Original file line numberDiff line numberDiff line change
@@ -1038,7 +1038,6 @@ def __init__(
10381038

10391039
self.attn_backend = get_attn_backend(
10401040
self.model_config.get_head_size(),
1041-
self.model_config.get_sliding_window(),
10421041
self.model_config.dtype,
10431042
self.kv_cache_dtype,
10441043
self.block_size,

vllm/worker/openvino_model_runner.py

-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def __init__(
7575

7676
self.attn_backend = get_attn_backend(
7777
self.model_config.get_head_size(),
78-
self.model_config.get_sliding_window(),
7978
self.model_config.dtype,
8079
self.kv_cache_dtype,
8180
self.block_size,

vllm/worker/openvino_worker.py

-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ def __init__(
7171
# Get attention backend.
7272
self.attn_backend = get_attn_backend(
7373
self.head_size,
74-
self.model_config.get_sliding_window(),
7574
self.model_config.dtype,
7675
self.cache_config.cache_dtype,
7776
self.block_size,

vllm/worker/tpu_model_runner.py

-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,6 @@ def __init__(
114114
dtype=np.int32)
115115
self.attn_backend = get_attn_backend(
116116
self.model_config.get_head_size(),
117-
self.model_config.get_sliding_window(),
118117
self.model_config.dtype,
119118
self.cache_config.cache_dtype,
120119
self.block_size,

vllm/worker/xpu_model_runner.py

-1
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,6 @@ def __init__(
390390

391391
self.attn_backend = get_attn_backend(
392392
self.model_config.get_head_size(),
393-
self.model_config.get_sliding_window(),
394393
self.model_config.dtype,
395394
self.kv_cache_dtype,
396395
self.block_size,

0 commit comments

Comments
 (0)