MHoubre commited on
Commit
01caa65
1 Parent(s): 11d6332

update for dataset name

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. kp20k.py +4 -16
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"KP20k": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"},"title": {"dtype": "string", "id": null, "_type": "Value"},"abstract": {"dtype": "string", "id": null, "_type": "Value"}, "keyphrases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "prmu": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "json", "config_name": "KP20k", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 654714676, "num_examples": 530809, "dataset_name": "json"}, "test": {"name": "test", "num_bytes": 24675779, "num_examples": 20000, "dataset_name": "json"}, "validation": {"name": "validation", "num_bytes": 24657665, "num_examples": 20000, "dataset_name": "json"}}, "download_size": 720581004, "post_processing_size": null, "dataset_size": 704048120, "size_in_bytes": 1424629124}, "KP20k": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"abstract": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "keyphrases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "prmu": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "json", "config_name": "KP20k", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 654714676, "num_examples": 530809, "dataset_name": "json"}, "test": {"name": "test", "num_bytes": 24675779, "num_examples": 20000, "dataset_name": "json"}, "validation": {"name": "validation", "num_bytes": 24657665, "num_examples": 20000, "dataset_name": "json"}}, "download_size": 720581004, "post_processing_size": null, "dataset_size": 704048120, "size_in_bytes": 1424629124}}
 
1
+ {"raw": {"description": "KP20k dataset for keyphrase extraction and generation in scientific paper.\n", "citation": "@InProceedings{meng-EtAl:2017:Long,\n author = {Meng, Rui and Zhao, Sanqiang and Han, Shuguang and He, Daqing and Brusilovsky, Peter and Chi, Yu},\n title = {Deep Keyphrase Generation},\n booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},\n month = {July},\n year = {2017},\n address = {Vancouver, Canada},\n publisher = {Association for Computational Linguistics},\n pages = {582--592},\n url = {http://aclweb.org/anthology/P17-1054}\n}\n", "homepage": "http://memray.me/uploads/acl17-keyphrase-generation.pdf", "license": "MIT LICENSE", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "abstract": {"dtype": "string", "id": null, "_type": "Value"}, "keyphrases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "prmu": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "kp20k", "config_name": "raw", "version": {"version_str": "0.0.1", "description": "", "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 654714676, "num_examples": 530809, "dataset_name": "kp20k"}, "test": {"name": "test", "num_bytes": 24675779, "num_examples": 20000, "dataset_name": "kp20k"}, "validation": {"name": "validation", "num_bytes": 24657665, "num_examples": 20000, "dataset_name": "kp20k"}}, "download_checksums": {"test.json": {"num_bytes": 25255559, "checksum": "3b6caeb55eaf941deb11f9e5152494310db2ac5970194e722798e3e035855561"}, "train.json": {"num_bytes": 670087948, "checksum": "59a20765c76126945e9eb298d7837175e886403f113c1a23c0cab7dc3cd9496d"}, "validation.json": {"num_bytes": 25237497, "checksum": "1c0ec7541c24c81b44c11c8cc5a0cbda88956a39bf94552dd03dcdf7fb25dd67"}}, "download_size": 720581004, "post_processing_size": null, "dataset_size": 704048120, "size_in_bytes": 1424629124}}
kp20k.py CHANGED
@@ -1,13 +1,9 @@
1
  import csv
2
  import json
3
  import os
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
-
7
  import datasets
8
 
9
 
10
-
11
  _CITATION = """\
12
  @InProceedings{meng-EtAl:2017:Long,
13
  author = {Meng, Rui and Zhao, Sanqiang and Han, Shuguang and He, Daqing and Brusilovsky, Peter and Chi, Yu},
@@ -41,11 +37,7 @@ _URLS = {
41
  "validation": "validation.json"
42
  }
43
 
44
- class KP20kConfig(datasets.BuilderConfig):
45
 
46
- def __init__(self, **kwargs):
47
-
48
- super(KP20kConfig, self).__init__(**kwargs)
49
 
50
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
  class KP20k(datasets.GeneratorBasedBuilder):
@@ -64,11 +56,7 @@ class KP20k(datasets.GeneratorBasedBuilder):
64
  # data = datasets.load_dataset('my_dataset', 'first_domain')
65
  # data = datasets.load_dataset('my_dataset', 'second_domain')
66
  BUILDER_CONFIGS = [
67
- KP20kConfig(
68
- name="raw",
69
- version=VERSION,
70
- description="This part of the dataset covers the raw data.",
71
- ),
72
  ]
73
 
74
  #DEFAULT_CONFIG_NAME = "raw" # It's not mandatory to have a default configuration. Just use one if it make sense.
@@ -111,7 +99,7 @@ class KP20k(datasets.GeneratorBasedBuilder):
111
  name=datasets.Split.TRAIN,
112
  # These kwargs will be passed to _generate_examples
113
  gen_kwargs={
114
- "filepath": os.path.join(data_dir,"train.json"),
115
  "split": "train",
116
  },
117
  ),
@@ -119,7 +107,7 @@ class KP20k(datasets.GeneratorBasedBuilder):
119
  name=datasets.Split.TEST,
120
  # These kwargs will be passed to _generate_examples
121
  gen_kwargs={
122
- "filepath": os.path.join(data_dir,"test.json"),
123
  "split": "test"
124
  },
125
  ),
@@ -127,7 +115,7 @@ class KP20k(datasets.GeneratorBasedBuilder):
127
  name=datasets.Split.VALIDATION,
128
  # These kwargs will be passed to _generate_examples
129
  gen_kwargs={
130
- "filepath": os.path.join(data_dir,"validation.json"),
131
  "split": "validation",
132
  },
133
  ),
 
1
  import csv
2
  import json
3
  import os
 
 
 
4
  import datasets
5
 
6
 
 
7
  _CITATION = """\
8
  @InProceedings{meng-EtAl:2017:Long,
9
  author = {Meng, Rui and Zhao, Sanqiang and Han, Shuguang and He, Daqing and Brusilovsky, Peter and Chi, Yu},
 
37
  "validation": "validation.json"
38
  }
39
 
 
40
 
 
 
 
41
 
42
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
43
  class KP20k(datasets.GeneratorBasedBuilder):
 
56
  # data = datasets.load_dataset('my_dataset', 'first_domain')
57
  # data = datasets.load_dataset('my_dataset', 'second_domain')
58
  BUILDER_CONFIGS = [
59
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
 
 
 
 
60
  ]
61
 
62
  #DEFAULT_CONFIG_NAME = "raw" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
99
  name=datasets.Split.TRAIN,
100
  # These kwargs will be passed to _generate_examples
101
  gen_kwargs={
102
+ "filepath": os.path.join(data_dir["train"]),
103
  "split": "train",
104
  },
105
  ),
 
107
  name=datasets.Split.TEST,
108
  # These kwargs will be passed to _generate_examples
109
  gen_kwargs={
110
+ "filepath": os.path.join(data_dir["test"]),
111
  "split": "test"
112
  },
113
  ),
 
115
  name=datasets.Split.VALIDATION,
116
  # These kwargs will be passed to _generate_examples
117
  gen_kwargs={
118
+ "filepath": os.path.join(data_dir["validation"]),
119
  "split": "validation",
120
  },
121
  ),