Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Size:
< 1K
Libraries:
Datasets
pandas
License:
File size: 1,894 Bytes
e86a312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import subprocess
import csv
import json
from PIL import Image
import os
import pandas as pd

def check_image(image_id, image_directory):
    """
    Check if an image with a given ID is properly downloaded.
    :param image_id: The ID of the image.
    :param image_directory: Directory where the image is stored.
    :return: True if the image is downloaded and valid, False otherwise.
    """
    # Constructing the file path (assuming images are in .jpg format)
    file_path = os.path.join(image_directory, f"{image_id}.jpg")

    # Check if the file exists
    if not os.path.exists(file_path):
        print(f"Image {image_id} not found.")
        return False

    # Try opening the image
    try:
        with Image.open(file_path) as img:
            img.verify()  # Verify if it's a correct image
        # print(f"Image {image_id} is valid.")
        return True
    except (IOError, SyntaxError) as e:
        print(f"Image {image_id} is corrupted: {e}")
        return False

subprocess.run(['bash', 'download_img.sh'])

csv_file_name = "youtube_new.csv"
json_file_name = "youtube_new.json"
json_structure = {
    "dataset_type": "test",
    "dataset_name": "youtube",
    "dataset_version": "0.0.2",
    "data": []
}

check_df = pd.read_csv(csv_file_name)
check_df_cleaned = check_df.drop_duplicates(subset=['video_id'])
print(f"Creating {check_df.shape[0]} questions {check_df_cleaned.shape[0]} unique images...")

with open(csv_file_name, 'r') as csv_file:
    csv_reader = csv.DictReader(csv_file)
    for row in csv_reader:
        if check_image(row['video_id'], 'images_new'):
            row['video_classes'] = row['video_classes'].split(',')
            row['answers'] = row['answers'].split(',')
            json_structure["data"].append(row)
        else:
            pass
with open(json_file_name, 'w') as json_file:
    json.dump(json_structure, json_file, indent=4)