forked from pytorch/examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtranslate.py
123 lines (95 loc) · 4.35 KB
/
translate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import onmt
import torch
import argparse
import math
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', default=100,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had the highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / wordsTotal,
name, math.exp(-scoreTotal/wordsTotal)))
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt)
outF = open(opt.output, 'w')
predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0
srcBatch, tgtBatch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
for line in open(opt.src):
srcTokens = line.split()
srcBatch += [srcTokens]
if tgtF:
tgtTokens = tgtF.readline().split() if tgtF else None
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
predBatch, predScore, goldScore = translator.translate(srcBatch, tgtBatch)
predScoreTotal += sum(score[0] for score in predScore)
predWordsTotal += sum(len(x[0]) for x in predBatch)
if tgtF is not None:
goldScoreTotal += sum(goldScore)
goldWordsTotal += sum(len(x) for x in tgtBatch)
for b in range(len(predBatch)):
count += 1
outF.write(" ".join(predBatch[b][0]) + '\n')
if opt.verbose:
srcSent = ' '.join(srcBatch[b])
if translator.tgt_dict.lower:
srcSent = srcSent.lower()
print('SENT %d: %s' % (count, srcSent))
print('PRED %d: %s' % (count, " ".join(predBatch[b][0])))
print("PRED SCORE: %.4f" % predScore[b][0])
if tgtF is not None:
tgtSent = ' '.join(tgtBatch[b])
if translator.tgt_dict.lower:
tgtSent = tgtSent.lower()
print('GOLD %d: %s ' % (count, tgtSent))
print("GOLD SCORE: %.4f" % goldScore[b])
if opt.n_best > 1:
print('\nBEST HYP:')
for n in range(opt.n_best):
print("[%.4f] %s" % (predScore[b][n], " ".join(predBatch[b][n])))
print('')
srcBatch, tgtBatch = [], []
reportScore('PRED', predScoreTotal, predWordsTotal)
if tgtF:
reportScore('GOLD', goldScoreTotal, goldWordsTotal)
if tgtF:
tgtF.close()
if __name__ == "__main__":
main()