|
|
|
|
|
import pandas as pd |
|
import numpy as np |
|
|
|
|
|
input_file_path = '../original_data/data.csv' |
|
|
|
base_path = "." |
|
|
|
train_output_path = base_path + 'train_set.csv' |
|
test_output_path = base_path + 'test_set.csv' |
|
validation_output_path = base_path + 'validation_set.csv' |
|
small_test_output_path = base_path + 'small_test_set.csv' |
|
small_validation_output_path = base_path + 'small_validation_set.csv' |
|
|
|
NUMBER_OF_UNIQUE_SENTENCES = 1500 |
|
NUMBER_OF_REPLICATED_SENTENCES = 3000 |
|
REPLICATION_RATE = 10 |
|
|
|
|
|
df = pd.read_csv(input_file_path, encoding='utf-8') |
|
|
|
|
|
df = df.dropna(subset=['Urdu text']) |
|
|
|
|
|
grouped = df.groupby('Urdu text')['Roman-Urdu text'].apply(list).reset_index() |
|
|
|
|
|
grouped['count'] = grouped['Roman-Urdu text'].apply(len) |
|
|
|
|
|
unique_sentences_val = grouped[grouped['count'] == 1].sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42) |
|
unique_sentences_val = unique_sentences_val.explode('Roman-Urdu text') |
|
|
|
|
|
unique_sentences_test = grouped[grouped['count'] == 1] |
|
unique_sentences_test = unique_sentences_test[~unique_sentences_test['Urdu text'].isin(unique_sentences_val['Urdu text'])] |
|
unique_sentences_test = unique_sentences_test.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42) |
|
unique_sentences_test = unique_sentences_test.explode('Roman-Urdu text') |
|
|
|
|
|
|
|
|
|
replicated_sentences_val = grouped[(grouped['count'] <= REPLICATION_RATE) & (grouped['count'] > 1)].sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42) |
|
|
|
|
|
replicated_sentences_test = grouped[(grouped['count'] <= REPLICATION_RATE) & (grouped['count'] > 1)] |
|
replicated_sentences_test = replicated_sentences_test[~replicated_sentences_test['Urdu text'].isin(replicated_sentences_val['Urdu text'])] |
|
replicated_sentences_test = replicated_sentences_test.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42) |
|
|
|
|
|
one_replicated_sentences_val = replicated_sentences_val.groupby('Urdu text').apply(lambda x: x.sample(1, random_state=42)).reset_index(drop=True) |
|
|
|
one_replicated_sentences_test = replicated_sentences_test.groupby('Urdu text').apply(lambda x: x.sample(1, random_state=42)).reset_index(drop=True) |
|
|
|
|
|
replicated_sentences_val = replicated_sentences_val.explode('Roman-Urdu text') |
|
one_replicated_sentences_val = one_replicated_sentences_val.explode('Roman-Urdu text') |
|
|
|
replicated_sentences_test = replicated_sentences_test.explode('Roman-Urdu text') |
|
one_replicated_sentences_test = one_replicated_sentences_test.explode('Roman-Urdu text') |
|
|
|
|
|
test_set = pd.concat([unique_sentences_test, replicated_sentences_test]).reset_index(drop=True) |
|
validation_set = pd.concat([unique_sentences_val, replicated_sentences_val]).reset_index(drop=True) |
|
|
|
|
|
|
|
small_unique_sentences_test = unique_sentences_test.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42) |
|
|
|
small_unique_sentences_val = unique_sentences_val.sample(n=NUMBER_OF_UNIQUE_SENTENCES, random_state=42) |
|
|
|
|
|
small_replicated_sentences_test = replicated_sentences_test.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42) |
|
|
|
small_replicated_sentences_val = replicated_sentences_val.sample(n=NUMBER_OF_REPLICATED_SENTENCES, random_state=42) |
|
|
|
|
|
|
|
small_unique_sentences_test = small_unique_sentences_test.explode('Roman-Urdu text') |
|
small_unique_sentences_val = small_unique_sentences_val.explode('Roman-Urdu text') |
|
small_replicated_sentences_test = small_replicated_sentences_test.explode('Roman-Urdu text') |
|
small_replicated_sentences_val = small_replicated_sentences_val.explode('Roman-Urdu text') |
|
|
|
|
|
small_test_set = pd.concat([small_unique_sentences_test, small_replicated_sentences_test]).reset_index(drop=True) |
|
small_validation_set = pd.concat([small_unique_sentences_val, small_replicated_sentences_val]).reset_index(drop=True) |
|
|
|
|
|
|
|
training_set = df[~df['Urdu text'].isin(test_set['Urdu text']) & ~df['Urdu text'].isin(validation_set['Urdu text'])] |
|
|
|
|
|
|
|
training_set[['Urdu text', 'Roman-Urdu text']].to_csv(train_output_path, index=False, encoding='utf-8') |
|
test_set[['Urdu text', 'Roman-Urdu text']].to_csv(test_output_path, index=False, encoding='utf-8') |
|
validation_set[['Urdu text', 'Roman-Urdu text']].to_csv(validation_output_path, index=False, encoding='utf-8') |
|
small_test_set[['Urdu text', 'Roman-Urdu text']].to_csv(small_test_output_path, index=False, encoding='utf-8') |
|
small_validation_set[['Urdu text', 'Roman-Urdu text']].to_csv(small_validation_output_path, index=False, encoding='utf-8') |
|
|
|
print(f"Training, test, validation, and smaller subsets have been saved to respective CSV files.") |
|
|
|
print(f"Number of rows in training set: {len(training_set)}") |
|
print(f"Number of rows in test set: {len(test_set)}") |
|
print(f"Number of rows in validation set: {len(validation_set)}") |
|
print(f"Number of rows in small test set: {len(small_test_set)}") |
|
print(f"Number of rows in small validation set: {len(small_validation_set)}") |