Roman-Urdu-Parl-split / scripts /check_substring.py
Muhammad Umer Tariq Butt
Add Roman-Urdu-Parl-split dataset files with LFS
bfd5fab
# Check if the training sentences are made up entirely of (test sentences or their repetitions)
# Observation : Some of the sentences in original dataset comprised of other sentences in the dataset
# For example, One sentence would be "A B C D" and another sentence would be "A B C D A B C D"
# This is not a good thing as the model can easily overfit on the training data and
# if the other sentence thats being repeated is in the test set, the model will perform very well on the test set but not on the real world data
import pandas as pd
import ahocorasick
from tqdm import tqdm
import json
# File paths
training_set_path = '../train_set.csv'
test_set_path = '../small_validation_set.csv'
print("Loading the CSV files...")
# Load the CSV files into Pandas DataFrames
test_set = pd.read_csv(test_set_path, encoding='utf-8')
training_set = pd.read_csv(training_set_path, encoding='utf-8')
print("CSV files loaded successfully.")
# Prepare the test sentences
test_sentences = test_set['Urdu text'].dropna()
# Remove extra spaces and standardize
test_sentences = test_sentences.apply(lambda x: ' '.join(x.strip().split()))
# Skip sentences with only one word if desired
test_sentences = test_sentences[test_sentences.str.split().str.len() > 1].unique()
test_sentences_set = set(test_sentences)
print(f"Number of test sentences: {len(test_sentences_set)}")
# Build the Aho-Corasick automaton
print("Building the Aho-Corasick automaton with test sentences...")
A = ahocorasick.Automaton()
for idx, test_sentence in enumerate(test_sentences):
A.add_word(test_sentence, (idx, test_sentence))
A.make_automaton()
print("Automaton built successfully.")
# Initialize matches dictionary
matches = {}
print("Processing training sentences...")
training_sentences = training_set['Urdu text'].dropna()
# Remove extra spaces and standardize
training_sentences = training_sentences.apply(lambda x: ' '.join(x.strip().split()))
training_sentences = training_sentences.unique()
for training_sentence in tqdm(training_sentences):
s = training_sentence
s_length = len(s)
matches_in_s = []
for end_index, (insert_order, test_sentence) in A.iter(s):
start_index = end_index - len(test_sentence) + 1
matches_in_s.append((start_index, end_index, test_sentence))
if not matches_in_s:
continue
# Sort matches by start_index
matches_in_s.sort(key=lambda x: x[0])
# Now check if matches cover the entire training sentence without gaps
# And all matches are of the same test sentence
covers_entire_sentence = True
current_index = 0
first_test_sentence = matches_in_s[0][2]
all_same_test_sentence = True
for start_index, end_index, test_sentence in matches_in_s:
if start_index != current_index:
covers_entire_sentence = False
break
if test_sentence != first_test_sentence:
all_same_test_sentence = False
break
current_index = end_index + 1
if covers_entire_sentence and current_index == s_length and all_same_test_sentence:
# Training sentence is made up entirely of repetitions of test_sentence
if test_sentence not in matches:
matches[test_sentence] = []
matches[test_sentence].append(s)
print("Processing completed.")
print("Number of matches: ", sum(len(v) for v in matches.values()))
# Optionally, save matches to a JSON file
output_file_path = '/netscratch/butt/Transliterate/RUP/finetuning/scripts/one_time_usage/test_training_matches.json'
with open(output_file_path, 'w', encoding='utf-8') as json_file:
json.dump(matches, json_file, ensure_ascii=False, indent=4)
print(f"Matches have been written to {output_file_path}")