Skip to content

Commit

Permalink
option to print language model words and their log probs during evalu…
Browse files Browse the repository at this point in the history
…ation
  • Loading branch information
alexeib authored and myleott committed Jul 25, 2018
1 parent e7b494f commit dbe9637
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 0 deletions.
16 changes: 16 additions & 0 deletions eval_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,10 @@ def main(args):
if args.remove_bpe is not None:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = set(i for i in range(len(task.dictionary)) if task.dictionary[i].endswith(bpe_cont))
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0

with progress_bar.build_progress_bar(args, itr) as t:
results = scorer.score_batched_itr(t, cuda=use_cuda, timer=gen_timer)
Expand All @@ -85,6 +87,20 @@ def main(args):
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum()
count += pos_scores.numel() - skipped_toks

if args.output_word_probs:
w = ''
word_prob = []
for i in range(len(hypo['tokens'])):
w_ind = hypo['tokens'][i].item()
w += task.dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
else:
word_prob.append((w, pos_scores[i].item()))
w = ''
print('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))

wps_meter.update(src_tokens.size(0))
t.log({'wps': round(wps_meter.avg)})

Expand Down
2 changes: 2 additions & 0 deletions fairseq/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,8 @@ def add_common_eval_args(group):
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')


def add_generation_args(parser):
Expand Down

0 comments on commit dbe9637

Please sign in to comment.