-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathposition_encoder.py
57 lines (49 loc) · 1.78 KB
/
position_encoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch
def positional_encoding(
tensor, num_encoding_functions=6, include_input=True, log_sampling=True
) -> torch.Tensor:
r"""Apply positional encoding to the input.
Args:
tensor (torch.Tensor): Input tensor to be positionally encoded.
encoding_size (optional, int): Number of encoding functions used to compute
a positional encoding (default: 6).
include_input (optional, bool): Whether or not to include the input in the
positional encoding (default: True).
Returns:
(torch.Tensor): Positional encoding of the input tensor.
"""
# the input tensor is added to the positional encoding if include_input=True
encoding = [tensor] if include_input else []
frequency_bands = None
if log_sampling:
frequency_bands = 2.0 ** torch.linspace(
0.0,
num_encoding_functions - 1,
num_encoding_functions,
dtype=tensor.dtype,
device=tensor.device,
)
else:
frequency_bands = torch.linspace(
2.0 ** 0.0,
2.0 ** (num_encoding_functions - 1),
num_encoding_functions,
dtype=tensor.dtype,
device=tensor.device,
)
for freq in frequency_bands:
for func in [torch.sin, torch.cos]:
encoding.append(func(tensor * freq))
# Special case, for no positional encoding
if len(encoding) == 1:
return encoding[0]
else:
return torch.cat(encoding, dim=-1)
def get_embedding_function(
num_encoding_functions=6, include_input=True, log_sampling=True
):
r"""Returns a lambda function that internally calls positional_encoding.
"""
return lambda x: positional_encoding(
x, num_encoding_functions, include_input, log_sampling
)