Skip to content

Commit 1d92ecb

Browse files
committed
expose memory efficient attention functions in __init__.py
1 parent a39265c commit 1d92ecb

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
from memory_efficient_attention_pytorch.memory_efficient_attention import Attention
2-
from memory_efficient_attention_pytorch.memory_efficient_cosine_sim_attention import CosineSimAttention
1+
from memory_efficient_attention_pytorch.memory_efficient_attention import Attention, memory_efficient_attention
2+
from memory_efficient_attention_pytorch.memory_efficient_cosine_sim_attention import CosineSimAttention, numerically_unstable_memory_efficient_attention

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
setup(
44
name = 'memory-efficient-attention-pytorch',
55
packages = find_packages(exclude=[]),
6-
version = '0.0.10',
6+
version = '0.0.11',
77
license='MIT',
88
description = 'Memory Efficient Attention - Pytorch',
99
author = 'Phil Wang',

0 commit comments

Comments
 (0)