From df69ff21d06f32c81166edfad17cbb1fe99be4b7 Mon Sep 17 00:00:00 2001 From: Wenjie Du Date: Tue, 24 Sep 2024 15:02:51 +0800 Subject: [PATCH] refactor: replace deprecated torch.cuda.amp.autocast; --- pypots/nn/modules/reformer/local_attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pypots/nn/modules/reformer/local_attention.py b/pypots/nn/modules/reformer/local_attention.py index a617b9ba..f84e6e56 100644 --- a/pypots/nn/modules/reformer/local_attention.py +++ b/pypots/nn/modules/reformer/local_attention.py @@ -13,7 +13,7 @@ from einops import rearrange from einops import repeat, pack, unpack from torch import nn, einsum -from torch.cuda.amp import autocast +from torch.amp import autocast TOKEN_SELF_ATTN_VALUE = -5e4 @@ -28,7 +28,7 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -@autocast(enabled=False) +@autocast("cuda", enabled=False) def apply_rotary_pos_emb(q, k, freqs, scale=1): q_len = q.shape[-2] q_freqs = freqs[..., -q_len:, :] @@ -95,7 +95,7 @@ def __init__(self, dim, scale_base=None, use_xpos=False): scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.register_buffer("scale", scale, persistent=False) - @autocast(enabled=False) + @autocast("cuda", enabled=False) def forward(self, x): seq_len, device = x.shape[-2], x.device