Datasets:

ArXiv:
Giguru Scheuer commited on
Commit
31bca25
1 Parent(s): 09c7ae3

Got it working

Browse files
Files changed (2) hide show
  1. canard_quretec.py +2 -8
  2. test.py +2 -0
canard_quretec.py CHANGED
@@ -12,12 +12,7 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
-
16
-
17
- import csv
18
  import json
19
- import os
20
-
21
  import datasets
22
 
23
 
@@ -48,7 +43,7 @@ _LICENSE = "CC BY-SA 4.0"
48
 
49
  # The HuggingFace dataset library don't host the datasets but only point to the original files
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URL = "https://drive.google.com/drive/folders/1e3s-V6VQqOKHrmn_kBStNsV0gGHPeJVf/"
52
  _URLs = {
53
  'gold_supervision': {
54
  'train': _URL+"train_gold_supervision.json",
@@ -108,7 +103,7 @@ class CanardQuretec(datasets.GeneratorBasedBuilder):
108
  "overlapping_terms": datasets.features.Sequence(feature=datasets.Value('string')),
109
  "answer_text_with_window": datasets.Value("string"),
110
  "answer_text": datasets.Value("string"),
111
- "bert_ner_overlap": datasets.Array2D(shape=(2,), dtype="string")
112
  }
113
  )
114
  return datasets.DatasetInfo(
@@ -168,7 +163,6 @@ class CanardQuretec(datasets.GeneratorBasedBuilder):
168
  """ Yields examples as (key, example) tuples. """
169
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
170
  # The `key` is here for legacy reason (tfds) and is not important in itself.
171
-
172
  with open(filepath) as f:
173
  data_array = json.load(f)
174
  for id_, item_dict in data_array:
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
 
 
 
15
  import json
 
 
16
  import datasets
17
 
18
 
 
43
 
44
  # The HuggingFace dataset library don't host the datasets but only point to the original files
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
+ _URL = "https://huggingface.co/datasets/uva-irlab/canard_quretec/resolve/main/"
47
  _URLs = {
48
  'gold_supervision': {
49
  'train': _URL+"train_gold_supervision.json",
 
103
  "overlapping_terms": datasets.features.Sequence(feature=datasets.Value('string')),
104
  "answer_text_with_window": datasets.Value("string"),
105
  "answer_text": datasets.Value("string"),
106
+ "bert_ner_overlap": datasets.features.Sequence(feature=datasets.features.Sequence(feature=datasets.Value('string')))
107
  }
108
  )
109
  return datasets.DatasetInfo(
 
163
  """ Yields examples as (key, example) tuples. """
164
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
165
  # The `key` is here for legacy reason (tfds) and is not important in itself.
 
166
  with open(filepath) as f:
167
  data_array = json.load(f)
168
  for id_, item_dict in data_array:
test.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from datasets import load_dataset
2
+ dataset = load_dataset('canard_quretec.py')