-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathflash.py
38 lines (32 loc) · 1.16 KB
/
flash.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import time
import torch
import torch.nn.functional as F
bz = 32
seq_len = 2048
dims = 512
n_heads = 8
q = torch.randn(bz, 1, seq_len, dims//n_heads, dtype=torch.bfloat16).cuda()
k = torch.randn(bz, 1, seq_len, dims//n_heads, dtype=torch.bfloat16).cuda()
v = torch.randn(bz, 1, seq_len, dims//n_heads, dtype=torch.bfloat16).cuda()
dropout_rate = 0.2
num_trials = 10
torch.cuda.synchronize()
start = time.time()
for i in range(num_trials):
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = F.dropout(attn, p=dropout_rate, training=True)
x = (attn @ v).transpose(1, 2) # .reshape(bz, seq_len, n_heads*dims)
torch.cuda.synchronize()
end = time.time()
print('Standard attention took {} seconds for {} trials'.format(end - start, num_trials))
with torch.backends.cuda.sdp_kernel(
enable_flash=True, enable_math=False, enable_mem_efficient=False
):
torch.cuda.synchronize()
start = time.time()
for i in range(num_trials):
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_rate)
torch.cuda.synchronize()
end = time.time()
print('Flash attention took {} seconds for {} trials'.format(end - start, num_trials))