bastienp commited on
Commit
a583900
1 Parent(s): be7e74f

chroe: remove script and use automatic dataset

Browse files
.gitattributes DELETED
@@ -1,55 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tar filter=lfs diff=lfs merge=lfs -text
30
- *.tflite filter=lfs diff=lfs merge=lfs -text
31
- *.tgz filter=lfs diff=lfs merge=lfs -text
32
- *.wasm filter=lfs diff=lfs merge=lfs -text
33
- *.xz filter=lfs diff=lfs merge=lfs -text
34
- *.zip filter=lfs diff=lfs merge=lfs -text
35
- *.zst filter=lfs diff=lfs merge=lfs -text
36
- *tfevents* filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - uncompressed
38
- *.pcm filter=lfs diff=lfs merge=lfs -text
39
- *.sam filter=lfs diff=lfs merge=lfs -text
40
- *.raw filter=lfs diff=lfs merge=lfs -text
41
- # Audio files - compressed
42
- *.aac filter=lfs diff=lfs merge=lfs -text
43
- *.flac filter=lfs diff=lfs merge=lfs -text
44
- *.mp3 filter=lfs diff=lfs merge=lfs -text
45
- *.ogg filter=lfs diff=lfs merge=lfs -text
46
- *.wav filter=lfs diff=lfs merge=lfs -text
47
- # Image files - uncompressed
48
- *.bmp filter=lfs diff=lfs merge=lfs -text
49
- *.gif filter=lfs diff=lfs merge=lfs -text
50
- *.png filter=lfs diff=lfs merge=lfs -text
51
- *.tiff filter=lfs diff=lfs merge=lfs -text
52
- # Image files - compressed
53
- *.jpg filter=lfs diff=lfs merge=lfs -text
54
- *.jpeg filter=lfs diff=lfs merge=lfs -text
55
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -5,6 +5,11 @@ tags:
5
  - watermak
6
  - computer-vision
7
  - object-detection
 
 
 
 
 
8
  ---
9
  # Dataset Card for Dataset Name
10
 
@@ -18,8 +23,6 @@ This dataset card aims to be a base template for new datasets. It has been gener
18
 
19
  <!-- Provide a longer summary of what this dataset is. -->
20
 
21
-
22
-
23
  - **Curated by:** [More Information Needed]
24
  - **Funded by [optional]:** [More Information Needed]
25
  - **Shared by [optional]:** [More Information Needed]
@@ -142,4 +145,4 @@ Users should be made aware of the risks, biases and limitations of the dataset.
142
 
143
  ## Dataset Card Contact
144
 
145
- [More Information Needed]
 
5
  - watermak
6
  - computer-vision
7
  - object-detection
8
+ configs:
9
+ - config_name: default
10
+ data_files:
11
+ - split: train
12
+ path: "data/train"
13
  ---
14
  # Dataset Card for Dataset Name
15
 
 
23
 
24
  <!-- Provide a longer summary of what this dataset is. -->
25
 
 
 
26
  - **Curated by:** [More Information Needed]
27
  - **Funded by [optional]:** [More Information Needed]
28
  - **Shared by [optional]:** [More Information Needed]
 
145
 
146
  ## Dataset Card Contact
147
 
148
+ [More Information Needed]
data/text/train.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:193fa1a526de000aabfaa23881755180ac8cf5ddea28fa3813d25301037c5c0c
3
- size 1586286
 
 
 
 
data/{text/valid.zip → train/watermarked_train_27.jpg} RENAMED
File without changes
data/train/watermarked_train_27.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"label": "text", "bbox": [68, 92, 80, 223]}]
visible-watermark-pita.py DELETED
@@ -1,99 +0,0 @@
1
- import os
2
- from glob import glob
3
-
4
- import datasets
5
- import json
6
- from PIL import Image
7
-
8
- _DESCRIPTION = """\
9
- Watermark Dataset
10
- """
11
-
12
- _VERSION = datasets.Version("1.0.0")
13
-
14
-
15
- class WatermarkPitaConfig(datasets.BuilderConfig):
16
- """Builder Config for Food-101"""
17
-
18
- def __init__(self, urls, categories, **kwargs):
19
- """BuilderConfig for Food-101.
20
-
21
- Args:
22
- repository: `string`, the name of the repository.
23
- urls: `dict<string, string>`, the urls to the data.
24
- categories: `list<string>`, the categories of the data.
25
-
26
- **kwargs: keyword arguments forwarded to super.
27
- """
28
- _VERSION = datasets.Version("1.0.0")
29
-
30
- super(WatermarkPitaConfig, self).__init__(version=_VERSION, **kwargs)
31
- self.urls = urls
32
- self.categories = categories
33
-
34
-
35
- class WatermarkPita(datasets.GeneratorBasedBuilder):
36
- """Watermark Dataset"""
37
-
38
- BUILDER_CONFIGS = [
39
- WatermarkPitaConfig(
40
- name="text",
41
- urls={"train": "data/text/train.zip", "valid": "data/text/valid.zip"},
42
- categories=["text"],
43
- ),
44
- WatermarkPitaConfig(
45
- name="logo",
46
- urls={"train": "data/logo/train.zip", "valid": "data/logo/valid.zip"},
47
- categories=["logo"],
48
- ),
49
- WatermarkPitaConfig(
50
- name="mixed",
51
- urls={"train": "data/mixed/train.zip", "valid": "data/mixed/valid.zip"},
52
- categories=["logo", "text"],
53
- ),
54
- ]
55
-
56
- DEFAULT_CONFIG_NAME = "text"
57
-
58
- def _info(self):
59
- return datasets.DatasetInfo(
60
- features=datasets.Features(
61
- {
62
- "image": datasets.Image(),
63
- "objects": datasets.Sequence(
64
- {
65
- "label": datasets.ClassLabel(names=self.config.categories),
66
- "bbox": datasets.features.Sequence(datasets.Value("int32"), length=4),
67
- }
68
- ),
69
- }
70
- ),
71
- description=_DESCRIPTION,
72
- )
73
-
74
- def _split_generators(self, dl_manager):
75
- data_dir = dl_manager.download_and_extract(self.config.urls)
76
-
77
- return [
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TRAIN,
80
- gen_kwargs={"split": "train", "data_dir": data_dir["train"]},
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- gen_kwargs={"split": "valid", "data_dir": data_dir["valid"]},
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, split, data_dir):
89
- image_dir = os.path.join(data_dir, "images")
90
- label_dir = os.path.join(data_dir, "labels")
91
-
92
- image_paths = sorted(glob(image_dir + "/*.jpg"))
93
- label_paths = sorted(glob(label_dir + "/*.json"))
94
-
95
- for idx, (image_path, label_path) in enumerate(zip(image_paths, label_paths)):
96
- with open(label_path, "r") as f:
97
- bboxes = json.load(f)
98
-
99
- yield idx, {"image": image_path, "objects": bboxes}