pietrolesci commited on
Commit
581e583
1 Parent(s): 4861e61

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +44 -28
README.md CHANGED
@@ -8,9 +8,10 @@ where the premise differs from the hypothesis by conjuncts removed, added, or re
8
 
9
  ## Dataset curation
10
  No curation is performed. This dataset is "as-is". The label mapping is the usual `{"entailment": 0, "neutral": 1, "contradiction": 2}`
11
- used in NLI datasets.
 
12
 
13
- NOTE: labels for `test` split are not available.
14
 
15
 
16
  ## Code to create the dataset
@@ -25,31 +26,46 @@ paths = {
25
  "test": "<path_to_folder>/ConjNLI-master/data/NLI/conj_test.tsv",
26
  }
27
 
28
- if __name__ =="__main__":
29
- dataset_splits = {}
30
- for split, path in paths.items():
31
-
32
- # read dataset split
33
- df = pd.read_csv(paths[split], sep="\t")
34
-
35
- # encode labels using the default mapping used by other nli datasets
36
- # i.e, entailment: 0, neutral: 1, contradiction: 2
37
- df.columns = df.columns.str.lower()
38
- if not "test" in path:
39
- df["label"] = df["label"].map({"entailment": 0, "neutral": 1, "contradiction": 2})
40
- else:
41
- df["label"] = -1
42
-
43
- # cast to dataset
44
- features = Features({
45
- "premise": Value(dtype="string", id=None),
46
- "hypothesis": Value(dtype="string", id=None),
47
- "label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
48
- })
49
- dataset = Dataset.from_pandas(df, features=features)
50
- dataset_splits[split] = dataset
51
-
52
- conj_nli = DatasetDict(dataset_splits)
53
- conj_nli.push_to_hub("pietrolesci/conj_nli", token="<token>")
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  ```
 
8
 
9
  ## Dataset curation
10
  No curation is performed. This dataset is "as-is". The label mapping is the usual `{"entailment": 0, "neutral": 1, "contradiction": 2}`
11
+ used in NLI datasets. Note that labels for `test` split are not available.
12
+ Also, the `train` split is originally named `adversarial_train_15k`.
13
 
14
+ Note that there are 2 instances (join on "premise", "hypothesis", "label") present both in `train` and `dev`.
15
 
16
 
17
  ## Code to create the dataset
 
26
  "test": "<path_to_folder>/ConjNLI-master/data/NLI/conj_test.tsv",
27
  }
28
 
29
+ dataset_splits = {}
30
+ for split, path in paths.items():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # load data
33
+ df = pd.read_csv(paths[split], sep="\t")
34
+
35
+ # encode labels using the default mapping used by other nli datasets
36
+ # i.e, entailment: 0, neutral: 1, contradiction: 2
37
+ df.columns = df.columns.str.lower()
38
+ if not "test" in path:
39
+ df["label"] = df["label"].map({"entailment": 0, "neutral": 1, "contradiction": 2})
40
+
41
+ else:
42
+ df["label"] = -1
43
+
44
+ # cast to dataset
45
+ features = Features({
46
+ "premise": Value(dtype="string", id=None),
47
+ "hypothesis": Value(dtype="string", id=None),
48
+ "label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
49
+ })
50
+ dataset = Dataset.from_pandas(df, features=features)
51
+ dataset_splits[split] = dataset
52
+
53
+ conj_nli = DatasetDict(dataset_splits)
54
+ conj_nli.push_to_hub("pietrolesci/conj_nli", token="<token>")
55
+
56
+
57
+ # check overlap between splits
58
+ from itertools import combinations
59
+ for i, j in combinations(conj_nli.keys(), 2):
60
+ print(
61
+ f"{i} - {j}: ",
62
+ pd.merge(
63
+ conj_nli[i].to_pandas(),
64
+ conj_nli[j].to_pandas(),
65
+ on=["premise", "hypothesis", "label"], how="inner"
66
+ ).shape[0],
67
+ )
68
+ #> train - dev: 2
69
+ #> train - test: 0
70
+ #> dev - test: 0
71
  ```