Skip to content

Commit

Permalink
fix FastAttention has no __flops__
Browse files Browse the repository at this point in the history
  • Loading branch information
YangYangGirl committed Aug 2, 2021
1 parent e6d881b commit 8421017
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion src/models/layers/performer.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ def __init__(self, dim_heads, nb_features=None, ortho_scaling=0,
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.__flops__ = 0

@torch.no_grad()
def redraw_projection_matrix(self, device):
Expand Down Expand Up @@ -235,4 +236,4 @@ def forward(self, x, nx=None, ny=None):
x = rearrange(x, 'b h n d -> b n (h d)')
x = self.proj(x)
x = self.proj_drop(x)
return x
return x

0 comments on commit 8421017

Please sign in to comment.