Skip to content

Commit

Permalink
Some more parralization in the forward() method (not really tested)
Browse files Browse the repository at this point in the history
  • Loading branch information
Roy Schwartz committed Oct 10, 2017
1 parent 9f63b1e commit 02024f0
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 10 deletions.
4 changes: 2 additions & 2 deletions data.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ def read_embeddings(filename):
word = e[0]

# TODO: this is for debugging only
# if len(vocab) == 5000:
# break
if len(vocab) == 5000:
break

good = 1
for i in list(word):
Expand Down
18 changes: 10 additions & 8 deletions soft_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import numpy as np
import os
import torch
from torch import FloatTensor, LongTensor, cat, dot, log, mm, mul, norm, randn, zeros
from torch import FloatTensor, LongTensor, cat, dot, log, mm, mul, norm, randn, zeros, ones
from torch.autograd import Variable
from torch.functional import stack
from torch.nn import Module, Parameter
Expand Down Expand Up @@ -239,8 +239,10 @@ def forward(self, doc):
doc -- a sequence of indices that correspond to the word embedding matrix
"""
scores = Variable(zeros(self.num_patterns))
hiddens = [ self.start.clone() for _ in range(self.num_patterns)]
z1 = Variable(zeros(1, 1))
#hiddens = [ self.start.clone() for _ in range(self.num_patterns)]
hiddens = fixed_var(zeros(self.num_patterns, self.pattern_length))
hiddens[:,0] = 1
z1 = Variable(ones(1, 1))
# z2 = Variable(zeros(1, 2))
for word_index in doc:
x = self.embeddings[word_index].view(self.embeddings.size()[1], 1)
Expand All @@ -256,11 +258,11 @@ def forward(self, doc):
one_forward_result = result[:,start:end]
# print("ofr:", one_forward_result.size(), "h:",hiddens[p].size())
# mul_res = mul(hiddens[p][:, :-1], one_forward_result)
hiddens[p] = self.start + \
cat((z1, mul(hiddens[p][:, :-1], one_forward_result)), 1)
# cat((z2, mul(hidden[:, :-2], two_forward_result)), 1)
scores[p] = scores[p] + hiddens[p][0, -1] # mm(hidden, self.final) # TODO: change if learning final state
return self.mlp.forward(stack([s for s in scores]).t())
hiddens[p, :] = cat((z1, mul(hiddens[p, :-1].clone(), one_forward_result)), 1)

scores = scores + hiddens[:, -1] # mm(hidden, self.final) # TODO: change if learning final state

return self.mlp.forward(stack(scores).t())

# scores = stack([p.forward(doc) for p in self.patterns])
# return self.mlp.forward(scores.t())
Expand Down

0 comments on commit 02024f0

Please sign in to comment.