Skip to content

Commit

Permalink
Position weight calculation in numpy (songyouwei#41)
Browse files Browse the repository at this point in the history
* Indexing operation in memnet and ram moved to numpy

* Update memnet.py
  • Loading branch information
GeneZC authored and songyouwei committed Apr 27, 2019
1 parent dc62dc6 commit e4da01e
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 16 deletions.
12 changes: 10 additions & 2 deletions models/memnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,17 @@ class MemNet(nn.Module):

def locationed_memory(self, memory, memory_len):
# here we just simply calculate the location vector in Model2's manner
for i in range(memory.size(0)):
batch_size = memory.shape[0]
seq_len = memory.shape[1]
memory_len = memory_len.cpu().numpy()
weight = [[] for i in range(batch_size)]
for i in range(batch_size):
for idx in range(memory_len[i]):
memory[i][idx] *= (1-float(idx)/int(memory_len[i]))
weight[i].append(1-float(idx+1)/memory_len[i])
for idx in range(memory_len[i], seq_len):
weight[i].append(1)
weight = torch.tensor(weight).to(self.opt.device)
memory = weight.unsqueeze(2)*memory
return memory

def __init__(self, embedding_matrix, opt):
Expand Down
37 changes: 23 additions & 14 deletions models/ram.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,29 @@

class RAM(nn.Module):
def locationed_memory(self, memory, memory_len, left_len, aspect_len):
u = torch.zeros(memory.size(0), memory.size(1), 1).to(self.opt.device)
for i in range(memory.size(0)):
for idx in range(memory_len[i]):
aspect_start = left_len[i]
if idx < aspect_start:
l = aspect_start - idx # l = absolute distance to the aspect
u[i][idx][0] = idx - aspect_start
elif idx < aspect_start + aspect_len[i]:
l = 0
else:
l = idx - aspect_start - aspect_len[i] + 1
u[i][idx][0] = idx - aspect_start - aspect_len[i] + 1
memory[i][idx] *= (1-float(l)/int(memory_len[i]))
memory = torch.cat([memory, u], dim=2)
batch_size = memory.shape[0]
seq_len = memory.shape[1]
memory_len = memory_len.cpu().numpy()
left_len = left_len.cpu().numpy()
aspect_len = aspect_len.cpu().numpy()
weight = [[] for i in range(batch_size)]
u = [[] for i in range(batch_size)]
for i in range(batch_size):
for idx in range(left_len[i]):
weight[i].append(1-(left_len[i]-idx)/memory_len[i])
u[i].append(idx - left_len[i])
for idx in range(left_len[i], left_len[i]+aspect_len[i]):
weight[i].append(1)
u[i].append(0)
for idx in range(left_len[i]+aspect_len[i], memory_len[i]):
weight[i].append(1-(idx-left_len[i]-aspect_len[i]+1)/memory_len[i])
u[i].append(idx-left_len[i]-aspect_len[i]+1)
for idx in range(memory_len[i], seq_len):
weight[i].append(1)
u[i].append(0)
u = torch.tensor(u).to(self.opt.device).unsqueeze(2)
weight = torch.tensor(weight).to(self.opt.device).unsqueeze(2)
memory = torch.cat([memory*weight, u], dim=2)
return memory

def __init__(self, embedding_matrix, opt):
Expand Down

0 comments on commit e4da01e

Please sign in to comment.