File size: 4,217 Bytes
55175ae
 
27e6c9e
 
 
 
55175ae
 
 
 
9f743c8
55175ae
 
27e6c9e
 
 
55175ae
27e6c9e
55175ae
27e6c9e
55175ae
 
 
27e6c9e
 
55175ae
 
27e6c9e
 
 
9f743c8
27e6c9e
55175ae
 
27e6c9e
 
55175ae
 
0aa41fc
27e6c9e
55175ae
 
 
 
 
 
 
 
 
 
 
 
 
 
441035c
 
 
 
55175ae
 
 
 
 
 
 
 
 
 
 
 
 
 
27e6c9e
55175ae
 
 
 
 
 
27e6c9e
55175ae
 
 
 
 
 
 
 
 
 
 
c2908e2
55175ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119

import datasets
# coding=utf-8
# Copyright 2024 HuggingFace Datasets Authors.
# Lint as: python3
"""The Shipping label Dataset."""

logger = datasets.logging.get_logger(__name__)


_CITATION = """
"""

_DESCRIPTION = """

The goal of this task is to provide a dataset for name entity recognition."""

_URL = "https://raw.githubusercontent.com/SanghaviHarshPankajkumar/shipping_label_project/main/NER/data/"
_TRAINING_FILE = "train.txt"
_VAL_FILE = "val.txt"
_TEST_FILE = "test.txt"


class shipping_labels_Config(datasets.BuilderConfig):
    """Shipping Label Dataset for ner"""

    def __init__(self, **kwargs):
        """BuilderConfig for Shipping Label data.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(shipping_labels_Config, self).__init__(**kwargs)


class shiping_label_ner(datasets.GeneratorBasedBuilder):
    """Shipping Label Dataset for ner"""

    BUILDER_CONFIGS = [
        shipping_labels_Config(
            name="shipping_label_ner", version=datasets.Version("1.0.0"), description="Shipping Label Dataset for ner"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "O",
                                "B-GCNUM",
                                "I-GCNUM",
                                "B-TRACK-ID",
                                "I-TRACK-ID"
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{_TRAINING_FILE}",
            "test": f"{_URL}{_TEST_FILE}",
            "val": f"{_URL}{_VAL_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            current_tokens = []
            current_labels = []
            sentence_counter = 0
            for row in f:
                row = row.rstrip()
                if row:
                    token, label = row.split(" ")
                    current_tokens.append(token)
                    current_labels.append(label)
                else:
                    # New sentence
                    if not current_tokens:
                        # Consecutive empty lines will cause empty sentences
                        continue
                    assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
                    sentence = (
                        sentence_counter,
                        {
                            "id": str(sentence_counter),
                            "tokens": current_tokens,
                            "ner_tags": current_labels,
                        },
                    )
                    sentence_counter += 1
                    current_tokens = []
                    current_labels = []
                    yield sentence
            # Don't forget last sentence in dataset 🧐
            if current_tokens:
                yield sentence_counter, {
                    "id": str(sentence_counter),
                    "tokens": current_tokens,
                    "ner_tags": current_labels,
                }