Roman-Urdu-Parl-split / scripts /splitting_rup_data.py
Muhammad Umer Tariq Butt
Add Roman-Urdu-Parl-split dataset files with LFS
bfd5fab
raw
history blame
6.51 kB
# bash /home/butt/run_docker_cpu.sh python splitting_rup_data.py
import pandas as pd
import numpy as np
# File path to the input CSV file
input_file_path = '../original_data/data.csv'
# Output file paths for training, test, and validation sets
base_path = "."
train_output_path = base_path + 'train_set.csv'
test_output_path = base_path + 'test_set.csv'
validation_output_path = base_path + 'validation_set.csv'
small_test_output_path = base_path + 'small_test_set.csv'
small_validation_output_path = base_path + 'small_validation_set.csv'
NUMBER_OF_UNIQUE_SENTENCES = 1500
NUMBER_OF_REPLICATED_SENTENCES = 3000
REPLICATION_RATE = 10
# Load the CSV file into a Pandas DataFrame
df = pd.read_csv(input_file_path, encoding='utf-8')
# Drop rows where 'Urdu text' is NaN
df = df.dropna(subset=['Urdu text'])
# Group by 'Urdu text' and aggregate the corresponding 'Roman-Urdu text'
grouped = df.groupby('Urdu text')['Roman-Urdu text'].apply(list).reset_index()
# Add a 'count' column to store the number of occurrences
grouped['count'] = grouped['Roman-Urdu text'].apply(len)
# Select NUMBER_OF_UNIQUE_SENTENCES least occurring groupbys (unique sentences without replication in the dataset) for validation
unique_sentences_val = grouped[grouped['count'] == 1].sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42)
unique_sentences_val = unique_sentences_val.explode('Roman-Urdu text') # Convert list to individual rows
# select NUMBER_OF_UNIQUE_SENTENCES least occuring groupbys for test but they should not be in validation set (unique_sentences_val)
unique_sentences_test = grouped[grouped['count'] == 1]
unique_sentences_test = unique_sentences_test[~unique_sentences_test['Urdu text'].isin(unique_sentences_val['Urdu text'])]
unique_sentences_test = unique_sentences_test.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42)
unique_sentences_test = unique_sentences_test.explode('Roman-Urdu text') # Convert list to individual rows
# variables replicated are for whole test/val sets and one_replicated are for small test/val sets
# Select NUMBER_OF_REPLICATED_SENTENCES groupbys from sentences that appear less than or equal to REPLICATION_RATE times
replicated_sentences_val = grouped[(grouped['count'] <= REPLICATION_RATE) & (grouped['count'] > 1)].sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42)
# do the same but for test set and they should not be in validation set
replicated_sentences_test = grouped[(grouped['count'] <= REPLICATION_RATE) & (grouped['count'] > 1)]
replicated_sentences_test = replicated_sentences_test[~replicated_sentences_test['Urdu text'].isin(replicated_sentences_val['Urdu text'])]
replicated_sentences_test = replicated_sentences_test.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42)
# select any 1 sentence from each group of the replicated sentences
one_replicated_sentences_val = replicated_sentences_val.groupby('Urdu text').apply(lambda x: x.sample(1, random_state=42)).reset_index(drop=True)
# do the same but for test set
one_replicated_sentences_test = replicated_sentences_test.groupby('Urdu text').apply(lambda x: x.sample(1, random_state=42)).reset_index(drop=True)
# explode both the replicated and one_replicated
replicated_sentences_val = replicated_sentences_val.explode('Roman-Urdu text')
one_replicated_sentences_val = one_replicated_sentences_val.explode('Roman-Urdu text')
replicated_sentences_test = replicated_sentences_test.explode('Roman-Urdu text')
one_replicated_sentences_test = one_replicated_sentences_test.explode('Roman-Urdu text')
# Prepare the test and validation sets
test_set = pd.concat([unique_sentences_test, replicated_sentences_test]).reset_index(drop=True)
validation_set = pd.concat([unique_sentences_val, replicated_sentences_val]).reset_index(drop=True)
# create smaller test and validation sets
# subset NUMBER_OF_UNIQUE_SENTENCES from unique test
small_unique_sentences_test = unique_sentences_test.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42)
# subset NUMBER_OF_UNIQUE_SENTENCES from unique validation
small_unique_sentences_val = unique_sentences_val.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42)
# subset NUMBER_OF_REPLICATED_SENTENCES from replicated test
small_replicated_sentences_test = replicated_sentences_test.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42)
# subset NUMBER_OF_REPLICATED_SENTENCES from replicated validation
small_replicated_sentences_val = replicated_sentences_val.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42)
# explode all the small sets
small_unique_sentences_test = small_unique_sentences_test.explode('Roman-Urdu text')
small_unique_sentences_val = small_unique_sentences_val.explode('Roman-Urdu text')
small_replicated_sentences_test = small_replicated_sentences_test.explode('Roman-Urdu text')
small_replicated_sentences_val = small_replicated_sentences_val.explode('Roman-Urdu text')
# combine the small sets
small_test_set = pd.concat([small_unique_sentences_test, small_replicated_sentences_test]).reset_index(drop=True)
small_validation_set = pd.concat([small_unique_sentences_val, small_replicated_sentences_val]).reset_index(drop=True)
# Prepare the training set by excluding the test and validation sets from the original DataFrame
# training set should be the whole data except fpr test_set and validation_set
training_set = df[~df['Urdu text'].isin(test_set['Urdu text']) & ~df['Urdu text'].isin(validation_set['Urdu text'])]
# Save only 'Urdu text' and 'Roman-Urdu text' columns to CSV files
training_set[['Urdu text', 'Roman-Urdu text']].to_csv(train_output_path, index=False, encoding='utf-8')
test_set[['Urdu text', 'Roman-Urdu text']].to_csv(test_output_path, index=False, encoding='utf-8')
validation_set[['Urdu text', 'Roman-Urdu text']].to_csv(validation_output_path, index=False, encoding='utf-8')
small_test_set[['Urdu text', 'Roman-Urdu text']].to_csv(small_test_output_path, index=False, encoding='utf-8')
small_validation_set[['Urdu text', 'Roman-Urdu text']].to_csv(small_validation_output_path, index=False, encoding='utf-8')
print(f"Training, test, validation, and smaller subsets have been saved to respective CSV files.")
# Print the number of rows in each file
print(f"Number of rows in training set: {len(training_set)}")
print(f"Number of rows in test set: {len(test_set)}")
print(f"Number of rows in validation set: {len(validation_set)}")
print(f"Number of rows in small test set: {len(small_test_set)}")
print(f"Number of rows in small validation set: {len(small_validation_set)}")