Skip to content

Commit

Permalink
Some minor changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Roy Schwartz committed Oct 10, 2017
1 parent 9040244 commit 9f63b1e
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 8 deletions.
4 changes: 2 additions & 2 deletions data.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ def read_embeddings(filename):
word = e[0]

# TODO: this is for debugging only
if len(vocab) == 5000:
break
# if len(vocab) == 5000:
# break

good = 1
for i in list(word):
Expand Down
15 changes: 9 additions & 6 deletions soft_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def forward(self, doc):
Calculate score for one document.
doc -- a sequence of indices that correspond to the word embedding matrix
"""
scores = [Variable(zeros(1)) for _ in range(self.num_patterns)]
scores = Variable(zeros(self.num_patterns))
hiddens = [ self.start.clone() for _ in range(self.num_patterns)]
z1 = Variable(zeros(1, 1))
# z2 = Variable(zeros(1, 2))
Expand All @@ -252,14 +252,15 @@ def forward(self, doc):
# # hidden[:, :-1].size(), one_forward_result.size(),
# "mul", mul(hidden[:, :-1], one_forward_result).size())
for p in range(self.num_patterns):
one_forward_result = self.get_subset(result, p,1)
start, end = self.get_subset(p,1)
one_forward_result = result[:,start:end]
# print("ofr:", one_forward_result.size(), "h:",hiddens[p].size())
# mul_res = mul(hiddens[p][:, :-1], one_forward_result)
hiddens[p] = self.start + \
cat((z1, mul(hiddens[p][:, :-1], one_forward_result)), 1)
# cat((z2, mul(hidden[:, :-2], two_forward_result)), 1)
scores[p][0] = scores[p][0] + hiddens[p][0, -1] # mm(hidden, self.final) # TODO: change if learning final state
return self.mlp.forward(stack([s[0] for s in scores]).t())
scores[p] = scores[p] + hiddens[p][0, -1] # mm(hidden, self.final) # TODO: change if learning final state
return self.mlp.forward(stack([s for s in scores]).t())

# scores = stack([p.forward(doc) for p in self.patterns])
# return self.mlp.forward(scores.t())
Expand All @@ -269,11 +270,11 @@ def transition_matrix(self, word_vec):

return result

def get_subset(self, result, pattern_index, diag_index):
def get_subset(self, pattern_index, diag_index):
large_n = self.num_diags * self.pattern_length
start = (pattern_index - 1) * large_n + diag_index*self.pattern_length
end = (pattern_index - 1) * large_n + (diag_index+1)*self.pattern_length - diag_index
return result[:, start:end]
return start, end

def predict(self, doc):
output = self.forward(doc).data
Expand Down Expand Up @@ -328,6 +329,8 @@ def train(train_data,
print(".", end="", flush=True)
loss += train_one_doc(model, doc, gold, optimizer)

print("\n")

train_acc = evaluate_accuracy(model, train_data)
dev_acc = evaluate_accuracy(model, dev_data)
# "param_norm:", math.sqrt(sum(p.data.norm() ** 2 for p in all_params)),
Expand Down

0 comments on commit 9f63b1e

Please sign in to comment.