whyen-wang commited on
Commit
b012d38
1 Parent(s): 9121593
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ annotations/
2
+ annotations_trainval2017.zip
3
+ *.jsonl
README.md CHANGED
@@ -1,3 +1,237 @@
1
  ---
2
  license: cc-by-4.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
+ size_categories:
4
+ - n<1K
5
+ task_categories:
6
+ - image-to-text
7
+ language:
8
+ - en
9
+ pretty_name: COCO Detection
10
  ---
11
+
12
+ # Dataset Card for "COCO Detection"
13
+
14
+ ## Quick Start
15
+ ### Usage
16
+ ```python
17
+ >>> from datasets.load import load_dataset
18
+
19
+ >>> dataset = load_dataset('whyen-wang/coco_detection')
20
+ >>> example = dataset['train'][500]
21
+ >>> print(example)
22
+ {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x426>,
23
+ 'bboxes': [
24
+ [192.4199981689453, 220.17999267578125,
25
+ 129.22999572753906, 148.3800048828125],
26
+ [76.94000244140625, 146.6300048828125,
27
+ 104.55000305175781, 109.33000183105469],
28
+ [302.8800048828125, 115.2699966430664,
29
+ 99.11000061035156, 119.2699966430664],
30
+ [0.0, 0.800000011920929,
31
+ 592.5700073242188, 420.25]],
32
+ 'categories': [46, 46, 46, 55],
33
+ 'inst.rles': {
34
+ 'size': [[426, 640], [426, 640], [426, 640], [426, 640]],
35
+ 'counts': [
36
+ 'gU`2b0d;...', 'RXP16m<=...', ']Xn34S=4...', 'n:U2o8W2...'
37
+ ]}}
38
+ ```
39
+
40
+ ### Visualization
41
+ ```python
42
+ >>> import cv2
43
+ >>> import numpy as np
44
+ >>> from PIL import Image
45
+
46
+ >>> def transforms(examples):
47
+ inst_rles = examples.pop('inst.rles')
48
+ annotation = []
49
+ for i in inst_rles:
50
+ inst_rles = [
51
+ {'size': size, 'counts': counts}
52
+ for size, counts in zip(i['size'], i['counts'])
53
+ ]
54
+ annotation.append(maskUtils.decode(inst_rles))
55
+ examples['annotation'] = annotation
56
+ return examples
57
+
58
+ >>> def visualize(example, names, colors):
59
+ image = np.array(example['image'])
60
+ bboxes = np.array(example['bboxes']).round().astype(int)
61
+ bboxes[:, 2:] += bboxes[:, :2]
62
+ categories = example['categories']
63
+ masks = example['annotation']
64
+ n = len(bboxes)
65
+ for i in range(n):
66
+ c = categories[i]
67
+ color, name = colors[c], names[c]
68
+ cv2.rectangle(image, bboxes[i, :2], bboxes[i, 2:], color.tolist(), 2)
69
+ cv2.putText(
70
+ image, name, bboxes[i, :2], cv2.FONT_HERSHEY_SIMPLEX,
71
+ 1, color.tolist(), 2, cv2.LINE_AA, False
72
+ )
73
+ image[masks[..., i] == 1] = image[masks[..., i] == 1] // 2 + color // 2
74
+ return image
75
+
76
+ >>> dataset.set_transform(transforms)
77
+
78
+ >>> names = dataset['train'].features['categories'].feature.names
79
+
80
+ >>> colors = np.ones((80, 3), np.uint8) * 255
81
+ >>> colors[:, 0] = np.linspace(0, 255, 80)
82
+ >>> colors = cv2.cvtColor(colors[None], cv2.COLOR_HSV2RGB)[0]
83
+
84
+ >>> example = dataset['train'][500]
85
+ >>> Image.fromarray(example)
86
+ ```
87
+
88
+ ## Table of Contents
89
+ - [Table of Contents](#table-of-contents)
90
+ - [Dataset Description](#dataset-description)
91
+ - [Dataset Summary](#dataset-summary)
92
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
93
+ - [Languages](#languages)
94
+ - [Dataset Structure](#dataset-structure)
95
+ - [Data Instances](#data-instances)
96
+ - [Data Fields](#data-fields)
97
+ - [Data Splits](#data-splits)
98
+ - [Dataset Creation](#dataset-creation)
99
+ - [Curation Rationale](#curation-rationale)
100
+ - [Source Data](#source-data)
101
+ - [Annotations](#annotations)
102
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
103
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
104
+ - [Social Impact of Dataset](#social-impact-of-dataset)
105
+ - [Discussion of Biases](#discussion-of-biases)
106
+ - [Other Known Limitations](#other-known-limitations)
107
+ - [Additional Information](#additional-information)
108
+ - [Dataset Curators](#dataset-curators)
109
+ - [Licensing Information](#licensing-information)
110
+ - [Citation Information](#citation-information)
111
+ - [Contributions](#contributions)
112
+
113
+ ## Dataset Description
114
+
115
+ - **Homepage:** https://cocodataset.org/
116
+ - **Repository:** None
117
+ - **Paper:** [Microsoft COCO: Common Objects in Context](https://arxiv.org/abs/1405.0312)
118
+ - **Leaderboard:** [Papers with Code](https://paperswithcode.com/dataset/coco)
119
+ - **Point of Contact:** None
120
+
121
+ ### Dataset Summary
122
+
123
+ COCO is a large-scale object detection, segmentation, and captioning dataset.
124
+
125
+ ### Supported Tasks and Leaderboards
126
+
127
+ [Object Detection](https://huggingface.co/tasks/object-detection)
128
+ [Image Segmentation](https://huggingface.co/tasks/image-segmentation)
129
+
130
+ ### Languages
131
+
132
+ en
133
+
134
+ ## Dataset Structure
135
+
136
+ ### Data Instances
137
+
138
+ An example looks as follows.
139
+
140
+ ```
141
+ {
142
+ "image": PIL.Image(mode="RGB"),
143
+ "captions": [
144
+ "Closeup of bins of food that include broccoli and bread.",
145
+ "A meal is presented in brightly colored plastic trays.",
146
+ "there are containers filled with different kinds of foods",
147
+ "Colorful dishes holding meat, vegetables, fruit, and bread.",
148
+ "A bunch of trays that have different food."
149
+ ]
150
+ }
151
+ ```
152
+
153
+ ### Data Fields
154
+
155
+ [More Information Needed]
156
+
157
+ ### Data Splits
158
+
159
+ | name | train | validation |
160
+ | ------- | ------: | ---------: |
161
+ | default | 118,287 | 5,000 |
162
+
163
+ ## Dataset Creation
164
+
165
+ ### Curation Rationale
166
+
167
+ [More Information Needed]
168
+
169
+ ### Source Data
170
+
171
+ #### Initial Data Collection and Normalization
172
+
173
+ [More Information Needed]
174
+
175
+ #### Who are the source language producers?
176
+
177
+ [More Information Needed]
178
+
179
+ ### Annotations
180
+
181
+ #### Annotation process
182
+
183
+ [More Information Needed]
184
+
185
+ #### Who are the annotators?
186
+
187
+ [More Information Needed]
188
+
189
+ ### Personal and Sensitive Information
190
+
191
+ [More Information Needed]
192
+
193
+ ## Considerations for Using the Data
194
+
195
+ ### Social Impact of Dataset
196
+
197
+ [More Information Needed]
198
+
199
+ ### Discussion of Biases
200
+
201
+ [More Information Needed]
202
+
203
+ ### Other Known Limitations
204
+
205
+ [More Information Needed]
206
+
207
+ ## Additional Information
208
+
209
+ ### Dataset Curators
210
+
211
+ [More Information Needed]
212
+
213
+ ### Licensing Information
214
+
215
+ Creative Commons Attribution 4.0 License
216
+
217
+ ### Citation Information
218
+
219
+ ```
220
+ @article{cocodataset,
221
+ author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
222
+ title = {Microsoft {COCO:} Common Objects in Context},
223
+ journal = {CoRR},
224
+ volume = {abs/1405.0312},
225
+ year = {2014},
226
+ url = {http://arxiv.org/abs/1405.0312},
227
+ archivePrefix = {arXiv},
228
+ eprint = {1405.0312},
229
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
230
+ biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
231
+ bibsource = {dblp computer science bibliography, https://dblp.org}
232
+ }
233
+ ```
234
+
235
+ ### Contributions
236
+
237
+ Thanks to [@github-whyen-wang](https://github.com/whyen-wang) for adding this dataset.
coco_detection.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from pathlib import Path
4
+
5
+ _HOMEPAGE = 'https://cocodataset.org/'
6
+ _LICENSE = 'Creative Commons Attribution 4.0 License'
7
+ _DESCRIPTION = 'COCO is a large-scale object detection, segmentation, and captioning dataset.'
8
+ _CITATION = '''\
9
+ @article{cocodataset,
10
+ author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
11
+ title = {Microsoft {COCO:} Common Objects in Context},
12
+ journal = {CoRR},
13
+ volume = {abs/1405.0312},
14
+ year = {2014},
15
+ url = {http://arxiv.org/abs/1405.0312},
16
+ archivePrefix = {arXiv},
17
+ eprint = {1405.0312},
18
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
19
+ biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
20
+ bibsource = {dblp computer science bibliography, https://dblp.org}
21
+ }
22
+ '''
23
+ _NAMES = [
24
+ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
25
+ 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
26
+ 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
27
+ 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
28
+ 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
29
+ 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
30
+ 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
31
+ 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
32
+ 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
33
+ 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
34
+ 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
35
+ 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
36
+ 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
37
+ ]
38
+
39
+
40
+ class COCODetectionConfig(datasets.BuilderConfig):
41
+ '''Builder Config for coco2017'''
42
+
43
+ def __init__(
44
+ self, description, homepage,
45
+ annotation_urls, **kwargs
46
+ ):
47
+ super(COCODetectionConfig, self).__init__(
48
+ version=datasets.Version('1.0.0', ''),
49
+ **kwargs
50
+ )
51
+ self.description = description
52
+ self.homepage = homepage
53
+ url = 'http://images.cocodataset.org/zips/'
54
+ self.train_image_url = url + 'train2017.zip'
55
+ self.val_image_url = url + 'val2017.zip'
56
+ self.train_annotation_urls = annotation_urls['train']
57
+ self.val_annotation_urls = annotation_urls['validation']
58
+
59
+
60
+ class COCODetection(datasets.GeneratorBasedBuilder):
61
+ BUILDER_CONFIGS = [
62
+ COCODetectionConfig(
63
+ description=_DESCRIPTION,
64
+ homepage=_HOMEPAGE,
65
+ annotation_urls={
66
+ 'train': 'data/instance_train.zip',
67
+ 'validation': 'data/instance_validation.zip'
68
+ },
69
+ )
70
+ ]
71
+
72
+ def _info(self):
73
+ features = datasets.Features({
74
+ 'image': datasets.Image(mode='RGB', decode=True, id=None),
75
+ 'bboxes': datasets.Sequence(
76
+ feature=datasets.Sequence(
77
+ feature=datasets.Value(dtype='float32', id=None),
78
+ length=4, id=None
79
+ ), length=-1, id=None
80
+ ),
81
+ 'categories': datasets.Sequence(
82
+ feature=datasets.ClassLabel(names=_NAMES),
83
+ length=-1, id=None
84
+ ),
85
+ 'inst.rles': datasets.Sequence(
86
+ feature={
87
+ 'size': datasets.Sequence(
88
+ feature=datasets.Value(dtype='int32', id=None),
89
+ length=2, id=None
90
+ ),
91
+ 'counts': datasets.Value(dtype='string', id=None)
92
+ },
93
+ length=-1, id=None
94
+ ),
95
+ })
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ train_image_path = dl_manager.download_and_extract(
106
+ self.config.train_image_url
107
+ )
108
+ val_image_path = dl_manager.download_and_extract(
109
+ self.config.val_image_url
110
+ )
111
+ train_annotation_paths = dl_manager.download_and_extract(
112
+ self.config.train_annotation_urls
113
+ )
114
+ val_annotation_paths = dl_manager.download_and_extract(
115
+ self.config.val_annotation_urls
116
+ )
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ gen_kwargs={
121
+ 'image_path': f'{train_image_path}/train2017',
122
+ 'annotation_path': f'{train_annotation_paths}/instance_train.jsonl'
123
+ }
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.VALIDATION,
127
+ gen_kwargs={
128
+ 'image_path': f'{val_image_path}/val2017',
129
+ 'annotation_path': f'{val_annotation_paths}/instance_validation.jsonl'
130
+ }
131
+ )
132
+ ]
133
+
134
+ def _generate_examples(self, image_path, annotation_path):
135
+ idx = 0
136
+ image_path = Path(image_path)
137
+ with open(annotation_path, 'r', encoding='utf-8') as f:
138
+ for line in f:
139
+ obj = json.loads(line.strip())
140
+ example = {
141
+ 'image': str(image_path / obj['image']),
142
+ 'bboxes': obj['bboxes'],
143
+ 'categories': obj['categories'],
144
+ 'inst.rles': obj['inst.rles']
145
+ }
146
+ yield idx, example
147
+ idx += 1
data/instance_train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b696f1c992d995acc5ec20714e54fe739ef4740f1d67ea134f1066707e171d53
3
+ size 140601384
data/instance_validation.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f636aae29d6bd22ee6bf7c20140976cfc6fb63ab8a68eb614badf5dc888f591c
3
+ size 5973571
prepare.ipynb ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 7,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import zipfile\n",
11
+ "import requests\n",
12
+ "import jsonlines\n",
13
+ "from tqdm import tqdm\n",
14
+ "from pathlib import Path\n",
15
+ "from pycocotools.coco import COCO\n",
16
+ "from pycocotools import mask as maskUtils"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {},
22
+ "source": [
23
+ "# Download Annotations"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "url = 'http://images.cocodataset.org/annotations/'\n",
33
+ "file = 'annotations_trainval2017.zip'\n",
34
+ "if not Path(f'./{file}').exists():\n",
35
+ " response = requests.get(url + file)\n",
36
+ " with open(file, 'wb') as f:\n",
37
+ " f.write(response.content)\n",
38
+ "\n",
39
+ " with zipfile.ZipFile(file, 'r') as zipf:\n",
40
+ " zipf.extractall(Path())\n"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "metadata": {},
46
+ "source": [
47
+ "# Read annotations"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": null,
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "coco91_to_coco80 = [\n",
57
+ " None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None,\n",
58
+ " 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n",
59
+ " 23, None, 24, 25, None, None, 26, 27, 28, 29, 30,\n",
60
+ " 31, 32, 33, 34, 35, 36, 37, 38, 39, None, 40, 41,\n",
61
+ " 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,\n",
62
+ " 55, 56, 57, 58, 59, None, 60, None, None, 61, None,\n",
63
+ " 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, None,\n",
64
+ " 73, 74, 75, 76, 77, 78, 79\n",
65
+ "]"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "markdown",
70
+ "metadata": {},
71
+ "source": [
72
+ "# Instance Segmentation Task"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": null,
78
+ "metadata": {},
79
+ "outputs": [],
80
+ "source": [
81
+ "train_data = COCO('annotations/instances_train2017.json')\n",
82
+ "val_data = COCO('annotations/instances_val2017.json')"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "metadata": {},
89
+ "outputs": [],
90
+ "source": [
91
+ "for split, data in zip(['train', 'validation'], [train_data, val_data]):\n",
92
+ " with jsonlines.open(f'data/instance_{split}.jsonl', mode='w') as writer:\n",
93
+ " for image_id, image_info in tqdm(data.imgs.items()):\n",
94
+ " bboxes, categories, instance_rles = [], [], []\n",
95
+ " anns = data.imgToAnns[image_id]\n",
96
+ " height, width = image_info['height'], image_info['width']\n",
97
+ " for ann in anns:\n",
98
+ " bboxes.append(ann['bbox'])\n",
99
+ " categories.append(coco91_to_coco80[ann['category_id']])\n",
100
+ " segm = ann['segmentation']\n",
101
+ " if isinstance(segm, list):\n",
102
+ " rles = maskUtils.frPyObjects(segm, height, width)\n",
103
+ " rle = maskUtils.merge(rles)\n",
104
+ " rle['counts'] = rle['counts'].decode()\n",
105
+ " elif isinstance(segm['counts'], list):\n",
106
+ " rle = maskUtils.frPyObjects(segm, height, width)\n",
107
+ " rle['counts'] = rle['counts'].decode()\n",
108
+ " else:\n",
109
+ " rle = segm\n",
110
+ " instance_rles.append(rle)\n",
111
+ " writer.write({\n",
112
+ " 'image': image_info['file_name'],\n",
113
+ " 'bboxes': bboxes,\n",
114
+ " 'categories': categories,\n",
115
+ " 'inst.rles': instance_rles\n",
116
+ " })"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": 8,
122
+ "metadata": {},
123
+ "outputs": [],
124
+ "source": [
125
+ "for split in ['train', 'validation']:\n",
126
+ " file_path = f'data/instance_{split}.jsonl'\n",
127
+ " with zipfile.ZipFile(f'data/instance_{split}.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:\n",
128
+ " zipf.write(file_path, os.path.basename(file_path))"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": null,
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": []
137
+ }
138
+ ],
139
+ "metadata": {
140
+ "kernelspec": {
141
+ "display_name": ".venv",
142
+ "language": "python",
143
+ "name": "python3"
144
+ },
145
+ "language_info": {
146
+ "codemirror_mode": {
147
+ "name": "ipython",
148
+ "version": 3
149
+ },
150
+ "file_extension": ".py",
151
+ "mimetype": "text/x-python",
152
+ "name": "python",
153
+ "nbconvert_exporter": "python",
154
+ "pygments_lexer": "ipython3",
155
+ "version": "3.12.2"
156
+ }
157
+ },
158
+ "nbformat": 4,
159
+ "nbformat_minor": 2
160
+ }