File size: 832 Bytes
7597f56 48ccedc 7597f56 48ccedc 7597f56 48ccedc 1546bf6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
from datasets import load_dataset, DatasetDict
# Load your dataset from a local JSONL file
dataset = load_dataset("json", data_files="listings.jsonl")
dataset = dataset.shuffle(seed=42)
# Split the dataset into train and temp
split_dataset = dataset["train"].train_test_split(test_size=0.3)
train_dataset = split_dataset["train"]
temp_dataset = split_dataset["test"]
# Split the temp dataset into validation and test
split_temp_dataset = temp_dataset.train_test_split(test_size=0.5)
validation_dataset = split_temp_dataset["train"]
test_dataset = split_temp_dataset["test"]
# Combine all splits into a DatasetDict
final_dataset = DatasetDict(
{"train": train_dataset, "validation": validation_dataset, "test": test_dataset}
)
# Push the split datasets to Hugging Face Hub
final_dataset.push_to_hub("fb-housing-posts")
|