Skip to content

Commit

Permalink
correct label extraction + add note on discrepancies on trained MNLI …
Browse files Browse the repository at this point in the history
…model and HANS (huggingface#6221)
  • Loading branch information
VictorSanh authored Aug 3, 2020
1 parent 3c289fb commit 0513f8d
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions examples/adversarial/utils_hans.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,11 @@ def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")

def get_labels(self):
"""See base class."""
"""See base class.
Note that we follow the standard three labels for MNLI
(see :class:`~transformers.data.processors.utils.MnliProcessor`)
but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while
`entailment` is label 1."""
return ["contradiction", "entailment", "neutral"]

def _create_examples(self, lines, set_type):
Expand All @@ -268,7 +272,7 @@ def _create_examples(self, lines, set_type):
text_a = line[5]
text_b = line[6]
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
label = line[-1]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples

Expand Down

0 comments on commit 0513f8d

Please sign in to comment.