-
Notifications
You must be signed in to change notification settings - Fork 8
/
Embedding.lua
37 lines (32 loc) · 1019 Bytes
/
Embedding.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
local Embedding, parent = torch.class('Embedding', 'nn.Module')
function Embedding:__init(inputSize, outputSize)
parent.__init(self)
self.outputSize = outputSize
self.weight = torch.Tensor(inputSize, outputSize)
self.gradWeight = torch.Tensor(inputSize, outputSize)
end
function Embedding:updateOutput(input)
self.output:resize(input:size(1), self.outputSize)
for i = 1, input:size(1) do
self.output[i]:copy(self.weight[input[i]])
end
return self.output
end
function Embedding:updateGradInput(input, gradOutput)
if self.gradInput then
self.gradInput:resize(input:size())
return self.gradInput
end
end
function Embedding:accGradParameters(input, gradOutput, scale)
scale = scale or 1
if scale == 0 then
self.gradWeight:zero()
end
for i = 1, input:size(1) do
local word = input[i]
self.gradWeight[word]:add(gradOutput[i])
end
end
-- we do not need to accumulate parameters when sharing
Embedding.sharedAccUpdateGradParameters = Embedding.accUpdateGradParameters