|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import pandas as pd |
|
import ahocorasick |
|
from tqdm import tqdm |
|
import json |
|
|
|
|
|
training_set_path = '../train_set.csv' |
|
test_set_path = '../small_validation_set.csv' |
|
|
|
print("Loading the CSV files...") |
|
|
|
test_set = pd.read_csv(test_set_path, encoding='utf-8') |
|
training_set = pd.read_csv(training_set_path, encoding='utf-8') |
|
|
|
print("CSV files loaded successfully.") |
|
|
|
|
|
test_sentences = test_set['Urdu text'].dropna() |
|
|
|
test_sentences = test_sentences.apply(lambda x: ' '.join(x.strip().split())) |
|
|
|
test_sentences = test_sentences[test_sentences.str.split().str.len() > 1].unique() |
|
test_sentences_set = set(test_sentences) |
|
|
|
print(f"Number of test sentences: {len(test_sentences_set)}") |
|
|
|
|
|
print("Building the Aho-Corasick automaton with test sentences...") |
|
A = ahocorasick.Automaton() |
|
|
|
for idx, test_sentence in enumerate(test_sentences): |
|
A.add_word(test_sentence, (idx, test_sentence)) |
|
|
|
A.make_automaton() |
|
print("Automaton built successfully.") |
|
|
|
|
|
matches = {} |
|
|
|
print("Processing training sentences...") |
|
training_sentences = training_set['Urdu text'].dropna() |
|
|
|
training_sentences = training_sentences.apply(lambda x: ' '.join(x.strip().split())) |
|
training_sentences = training_sentences.unique() |
|
|
|
for training_sentence in tqdm(training_sentences): |
|
s = training_sentence |
|
s_length = len(s) |
|
matches_in_s = [] |
|
for end_index, (insert_order, test_sentence) in A.iter(s): |
|
start_index = end_index - len(test_sentence) + 1 |
|
matches_in_s.append((start_index, end_index, test_sentence)) |
|
if not matches_in_s: |
|
continue |
|
|
|
matches_in_s.sort(key=lambda x: x[0]) |
|
|
|
|
|
covers_entire_sentence = True |
|
current_index = 0 |
|
first_test_sentence = matches_in_s[0][2] |
|
all_same_test_sentence = True |
|
for start_index, end_index, test_sentence in matches_in_s: |
|
if start_index != current_index: |
|
covers_entire_sentence = False |
|
break |
|
if test_sentence != first_test_sentence: |
|
all_same_test_sentence = False |
|
break |
|
current_index = end_index + 1 |
|
if covers_entire_sentence and current_index == s_length and all_same_test_sentence: |
|
|
|
if test_sentence not in matches: |
|
matches[test_sentence] = [] |
|
matches[test_sentence].append(s) |
|
print("Processing completed.") |
|
print("Number of matches: ", sum(len(v) for v in matches.values())) |
|
|
|
|
|
output_file_path = '/netscratch/butt/Transliterate/RUP/finetuning/scripts/one_time_usage/test_training_matches.json' |
|
with open(output_file_path, 'w', encoding='utf-8') as json_file: |
|
json.dump(matches, json_file, ensure_ascii=False, indent=4) |
|
|
|
print(f"Matches have been written to {output_file_path}") |
|
|