File size: 3,211 Bytes
c67f462
032c4ac
c67f462
 
19794d7
 
c67f462
 
 
 
 
 
 
 
41f1c8a
 
c67f462
41f1c8a
 
19794d7
41f1c8a
 
 
 
19794d7
41f1c8a
 
 
19794d7
41f1c8a
 
 
 
19794d7
 
c67f462
 
 
41f1c8a
 
 
 
 
 
 
 
19794d7
41f1c8a
c67f462
 
 
 
 
 
19794d7
 
41f1c8a
19794d7
 
 
 
 
c67f462
 
 
 
 
 
41f1c8a
032c4ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a32d392
8cddfb1
 
032c4ac
 
 
19794d7
032c4ac
8cddfb1
 
 
e8c8ada
8cddfb1
 
 
 
 
 
 
 
c67f462
c2130f1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
from glob import glob

import datasets
import json
from PIL import Image

_DESCRIPTION = """\
    Watermark Dataset
"""

_VERSION = datasets.Version("1.0.0")


class WatermarkPitaConfig(datasets.BuilderConfig):
    """Builder Config for Food-101"""

    def __init__(self, repository, urls, categories, **kwargs):
        """BuilderConfig for Food-101.
        
        Args:
            repository: `string`, the name of the repository.
            urls: `dict<string, string>`, the urls to the data.
            categories: `list<string>`, the categories of the data.
          
            **kwargs: keyword arguments forwarded to super.
        """
        _VERSION = datasets.Version("1.0.0")

        super(WatermarkPitaConfig, self).__init__(version=_VERSION, **kwargs)
        self.repository = repository
        self.urls = categories
        self.categories = categories


class WatermarkPita(datasets.GeneratorBasedBuilder):
    """Watermark Dataset"""

    BUILDER_CONFIGS = [
        WatermarkPitaConfig(
            name="text",
            repository="data",
            urls={"train": "data/train.zip", "valid": "data/valid.zip"},
            categories=["text"],
        )
    ]
  
    _DEFAULT_CONFIG_NAME = "text"

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "image": datasets.Image(),
                    "objects": datasets.Sequence(
                        {
                            "label": datasets.ClassLabel(names=self.config.categories),
                            "bbox": datasets.Sequence(
                                datasets.Value("int32"), length=4
                            ),
                        }
                    ),
                }
            ),
            description=_DESCRIPTION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(self.config.urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"split": "train", "data_dir": data_dir["train"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"split": "valid", "data_dir": data_dir["valid"]},
            ),
        ]

    def _generate_examples(self, split, data_dir):
        image_dir = os.path.join(data_dir, "images")
        label_dir = os.path.join(data_dir, "labels")

        image_paths = sorted(glob(image_dir + "/*.jpg"))
        label_paths = sorted(glob(label_dir + "/*.json"))
   

        for idx, (image_path, label_path) in enumerate(zip(image_paths, label_paths)):
            with open(label_path, "r") as f:
                bbox = json.load(f)

            objects = []
            objects.append(
                {
                    "label": bbox["label"],
                    "bbox": [
                        bbox["x"],
                        bbox["y"],
                        bbox["width"],
                        bbox["height"],
                    ],
                }
            )

            yield idx, {"image": image_path, "objects": objects}