Datasets:

Modalities:
Text
Libraries:
Datasets
License:
parquet-converter commited on
Commit
ac398f4
1 Parent(s): 1c2747b

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.wasm filter=lfs diff=lfs merge=lfs -text
25
- *.xz filter=lfs diff=lfs merge=lfs -text
26
- *.zip filter=lfs diff=lfs merge=lfs -text
27
- *.zstandard filter=lfs diff=lfs merge=lfs -text
28
- *tfevents* filter=lfs diff=lfs merge=lfs -text
29
- # Audio files - uncompressed
30
- *.pcm filter=lfs diff=lfs merge=lfs -text
31
- *.sam filter=lfs diff=lfs merge=lfs -text
32
- *.raw filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - compressed
34
- *.aac filter=lfs diff=lfs merge=lfs -text
35
- *.flac filter=lfs diff=lfs merge=lfs -text
36
- *.mp3 filter=lfs diff=lfs merge=lfs -text
37
- *.ogg filter=lfs diff=lfs merge=lfs -text
38
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-sa-4.0
3
- ---
 
 
 
 
SDU-test.py DELETED
@@ -1,90 +0,0 @@
1
- import os
2
-
3
- import datasets
4
- from typing import List
5
- import json
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
-
9
-
10
- _CITATION = """
11
- """
12
-
13
- _DESCRIPTION = """
14
- This is the dataset repository for SDU Dataset from SDU workshop at AAAI22.
15
- The dataset can help build sequence labelling models for the task Abbreviation Detection.
16
- """
17
-
18
- class SDUtestConfig(datasets.BuilderConfig):
19
- """BuilderConfig for Conll2003"""
20
-
21
- def __init__(self, **kwargs):
22
- """BuilderConfig forConll2003.
23
- Args:
24
- **kwargs: keyword arguments forwarded to super.
25
- """
26
- super(SDUtestConfig, self).__init__(**kwargs)
27
-
28
-
29
- class SDUtestConfig(datasets.GeneratorBasedBuilder):
30
- """SDU Filtered dataset."""
31
-
32
- BUILDER_CONFIGS = [
33
- SDUtestConfig(name="SDUtest", version=datasets.Version("0.0.2"), description="SDU test dataset"),
34
- ]
35
-
36
- def _info(self):
37
- return datasets.DatasetInfo(
38
- description=_DESCRIPTION,
39
- features=datasets.Features(
40
- {
41
- "id": datasets.Value("string"),
42
- "tokens": datasets.Sequence(datasets.Value("string")),
43
- "ner_tags": datasets.Sequence(
44
- datasets.features.ClassLabel(
45
- names=[
46
- "B-O",
47
- "B-AC",
48
- "I-AC",
49
- "B-LF",
50
- "I-LF"
51
- ]
52
- )
53
- ),
54
- }
55
- ),
56
- supervised_keys=None,
57
- homepage="",
58
- citation=_CITATION,
59
- )
60
-
61
- _URL = "https://huggingface.co/datasets/surrey-nlp/SDU-test/raw/main/"
62
- _URLS = {
63
- "train+dev": _URL + "sdu_data_trunc.json",
64
- #"dev": _URL + "PLOS-val15-filtered-pos_bio.json",
65
- #"test": _URL + "PLOS-test15-filtered-pos_bio.json"
66
- }
67
-
68
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
69
- urls_to_download = self._URLS
70
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
71
-
72
- return [
73
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train+dev"]}),
74
- #datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
75
- #datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
76
- ]
77
-
78
- def _generate_examples(self, filepath):
79
- """This function returns the examples in the raw (text) form."""
80
- logger.info("generating examples from = %s", filepath)
81
- with open(filepath) as f:
82
- plod = json.load(f)
83
- for object in plod:
84
- id_ = int(object['id'])
85
- yield id_, {
86
- "id": str(id_),
87
- "tokens": object['tokens'],
88
- #"pos_tags": object['pos_tags'],
89
- "ner_tags": object['ner_tags'],
90
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
SDUtest/sdu-test-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd4c9810991a8edca4f07fb984a8f202a1704e1ba7477a09c218bcb87c5b0d3b
3
+ size 1406748
sdu_data_trunc.json DELETED
The diff for this file is too large to render. See raw diff