Skip to content

Commit

Permalink
add scripts to convert MARCO V2 data from its format to DuReader format
Browse files Browse the repository at this point in the history
  • Loading branch information
liujing46 committed May 28, 2018
1 parent 2e506e3 commit c57f023
Show file tree
Hide file tree
Showing 5 changed files with 129 additions and 0 deletions.
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,19 @@ python run.py --predict --algo BIDAF --test_files ../data/demo/devset/search.dev

By default, the results are saved at `../data/results/` folder. You can change this by specifying `--result_dir DIR_PATH`.

## Run baseline systems on multilingual datasets

To help evaluate the system performance on multilingual datasets, we provide scripts to convert MS MARCO V2 data from its format to DuReader format.

[MS MARCO](http://www.msmarco.org/dataset.aspx) (Microsoft Machine Reading Comprehension) is an English dataset focused on machine reading comprehension and question answering. The design of MS MARCO and DuReader is similiar. It is worthwhile examining the MRC systems on both Chinese (DuReader) and English (MS MARCO) datasets.

You can download MS MARCO V2 data, and run the following scripts to convert the data from MS MARCO V2 format to DuReader format. Then, you can run and evaluate our DuReader baselines or your DuReader systems on MS MARCO data.

```
run_marco2dureader_preprocess.sh ../data/marco/train_v2.1.json ../data/marco/train_v2.1_dureaderformat.json
run_marco2dureader_preprocess.sh ../data/marco/dev_v2.1.json ../data/marco/dev_v2.1_dureaderformat.json
```

## Copyright and License
Copyright 2017 Baidu.com, Inc. All Rights Reserved

Expand Down
43 changes: 43 additions & 0 deletions utils/marco_tokenize_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#coding=utf8

import os, sys, json
import nltk

def _nltk_tokenize(sequence):
tokens = nltk.word_tokenize(sequence)

cur_char_offset = 0
token_offsets = []
token_words = []
for token in tokens:
cur_char_offset = sequence.find(token, cur_char_offset)
token_offsets.append([cur_char_offset, cur_char_offset + len(token) - 1])
token_words.append(token)
return token_offsets, token_words

def segment(input_js):
_, input_js['segmented_question'] = _nltk_tokenize(input_js['question'])
for doc_id, doc in enumerate(input_js['documents']):
doc['segmented_title'] = []
doc['segmented_paragraphs'] = []
for para_id, para in enumerate(doc['paragraphs']):
_, seg_para = _nltk_tokenize(para)
doc['segmented_paragraphs'].append(seg_para)
if 'answers' in input_js:
input_js['segmented_answers'] = []
for answer_id, answer in enumerate(input_js['answers']):
_, seg_answer = _nltk_tokenize(answer)
input_js['segmented_answers'].append(seg_answer)


if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: tokenize_data.py <input_path>')
exit()

nltk.download('punkt')

for line in open(sys.argv[1]):
dureader_js = json.loads(line.strip())
segment(dureader_js)
print(json.dumps(dureader_js))
37 changes: 37 additions & 0 deletions utils/marcov1_to_dureader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#coding=utf8

import sys
import json
import pandas as pd


def trans(input_js):
output_js = {}
output_js['question'] = input_js['query']
output_js['question_type'] = input_js['query_type']
output_js['question_id'] = input_js['query_id']
output_js['fact_or_opinion'] = ""
output_js['documents'] = []
for para_id, para in enumerate(input_js['passages']):
doc = {}
doc['title'] = ""
if 'is_selected' in para:
doc['is_selected'] = True if para['is_selected'] != 0 else False
doc['paragraphs'] = [para['passage_text']]
output_js['documents'].append(doc)

if 'answers' in input_js:
output_js['answers'] = input_js['answers']
return output_js


if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: marcov1_to_dureader.py <input_path>')
exit()

df = pd.read_json(sys.argv[1])
for row in df.iterrows():
marco_js = json.loads(row[1].to_json())
dureader_js = trans(marco_js)
print(json.dumps(dureader_js))
14 changes: 14 additions & 0 deletions utils/marcov2_to_v1_tojsonl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import sys
import json
import pandas as pd

if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: tojson.py <input_path> <output_path>')
exit()
infile = sys.argv[1]
outfile = sys.argv[2]
df = pd.read_json(infile)
with open(outfile, 'w') as f:
for row in df.iterrows():
f.write(row[1].to_json() + '\n')
22 changes: 22 additions & 0 deletions utils/run_marco2dureader_preprocess.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash

input_file=$1
output_file=$2

# convert the data from MARCO V2 (json) format to MARCO V1 (jsonl) format.
# the script was forked from MARCO repo.
# the format of MARCO V1 is much more easier to explore.
python3 marcov2_to_v1_tojsonl.py $input_file $input_file.marcov1

# convert the data from MARCO V1 format to DuReader format.
python3 marcov1_to_dureader.py $input_file.marcov1 >$input_file.dureader_raw

# tokenize the data.
python3 marco_tokenize_data.py $input_file.dureader_raw >$input_file.segmented

# find fake answers (indicating the start and end positions of answers in the document) for train and dev sets.
# note that this should not be applied for test set, since there is no ground truth in test set.
python preprocess.py $input_file.segmented >$output_file

# remove the temporal data files.
rm -rf $input_file.dureader_raw $input_file.segmented

0 comments on commit c57f023

Please sign in to comment.