forked from baidu/DuReader
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
add scripts to convert MARCO V2 data from its format to DuReader format
- Loading branch information
liujing46
committed
May 28, 2018
1 parent
2e506e3
commit c57f023
Showing
5 changed files
with
129 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
#coding=utf8 | ||
|
||
import os, sys, json | ||
import nltk | ||
|
||
def _nltk_tokenize(sequence): | ||
tokens = nltk.word_tokenize(sequence) | ||
|
||
cur_char_offset = 0 | ||
token_offsets = [] | ||
token_words = [] | ||
for token in tokens: | ||
cur_char_offset = sequence.find(token, cur_char_offset) | ||
token_offsets.append([cur_char_offset, cur_char_offset + len(token) - 1]) | ||
token_words.append(token) | ||
return token_offsets, token_words | ||
|
||
def segment(input_js): | ||
_, input_js['segmented_question'] = _nltk_tokenize(input_js['question']) | ||
for doc_id, doc in enumerate(input_js['documents']): | ||
doc['segmented_title'] = [] | ||
doc['segmented_paragraphs'] = [] | ||
for para_id, para in enumerate(doc['paragraphs']): | ||
_, seg_para = _nltk_tokenize(para) | ||
doc['segmented_paragraphs'].append(seg_para) | ||
if 'answers' in input_js: | ||
input_js['segmented_answers'] = [] | ||
for answer_id, answer in enumerate(input_js['answers']): | ||
_, seg_answer = _nltk_tokenize(answer) | ||
input_js['segmented_answers'].append(seg_answer) | ||
|
||
|
||
if __name__ == '__main__': | ||
if len(sys.argv) != 2: | ||
print('Usage: tokenize_data.py <input_path>') | ||
exit() | ||
|
||
nltk.download('punkt') | ||
|
||
for line in open(sys.argv[1]): | ||
dureader_js = json.loads(line.strip()) | ||
segment(dureader_js) | ||
print(json.dumps(dureader_js)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
#coding=utf8 | ||
|
||
import sys | ||
import json | ||
import pandas as pd | ||
|
||
|
||
def trans(input_js): | ||
output_js = {} | ||
output_js['question'] = input_js['query'] | ||
output_js['question_type'] = input_js['query_type'] | ||
output_js['question_id'] = input_js['query_id'] | ||
output_js['fact_or_opinion'] = "" | ||
output_js['documents'] = [] | ||
for para_id, para in enumerate(input_js['passages']): | ||
doc = {} | ||
doc['title'] = "" | ||
if 'is_selected' in para: | ||
doc['is_selected'] = True if para['is_selected'] != 0 else False | ||
doc['paragraphs'] = [para['passage_text']] | ||
output_js['documents'].append(doc) | ||
|
||
if 'answers' in input_js: | ||
output_js['answers'] = input_js['answers'] | ||
return output_js | ||
|
||
|
||
if __name__ == '__main__': | ||
if len(sys.argv) != 2: | ||
print('Usage: marcov1_to_dureader.py <input_path>') | ||
exit() | ||
|
||
df = pd.read_json(sys.argv[1]) | ||
for row in df.iterrows(): | ||
marco_js = json.loads(row[1].to_json()) | ||
dureader_js = trans(marco_js) | ||
print(json.dumps(dureader_js)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
import sys | ||
import json | ||
import pandas as pd | ||
|
||
if __name__ == '__main__': | ||
if len(sys.argv) != 3: | ||
print('Usage: tojson.py <input_path> <output_path>') | ||
exit() | ||
infile = sys.argv[1] | ||
outfile = sys.argv[2] | ||
df = pd.read_json(infile) | ||
with open(outfile, 'w') as f: | ||
for row in df.iterrows(): | ||
f.write(row[1].to_json() + '\n') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
#!/bin/bash | ||
|
||
input_file=$1 | ||
output_file=$2 | ||
|
||
# convert the data from MARCO V2 (json) format to MARCO V1 (jsonl) format. | ||
# the script was forked from MARCO repo. | ||
# the format of MARCO V1 is much more easier to explore. | ||
python3 marcov2_to_v1_tojsonl.py $input_file $input_file.marcov1 | ||
|
||
# convert the data from MARCO V1 format to DuReader format. | ||
python3 marcov1_to_dureader.py $input_file.marcov1 >$input_file.dureader_raw | ||
|
||
# tokenize the data. | ||
python3 marco_tokenize_data.py $input_file.dureader_raw >$input_file.segmented | ||
|
||
# find fake answers (indicating the start and end positions of answers in the document) for train and dev sets. | ||
# note that this should not be applied for test set, since there is no ground truth in test set. | ||
python preprocess.py $input_file.segmented >$output_file | ||
|
||
# remove the temporal data files. | ||
rm -rf $input_file.dureader_raw $input_file.segmented |