MIM-GOLD-NER / split.py
rominaoji's picture
Upload split.py with huggingface_hub
412f2f0 verified
#
# want data from all documents
# want data from all classes
#
file_names = [
"adjudications.txt",
"blog.txt",
"books.txt",
"emails.txt",
"fbl.txt",
"laws.txt",
"mbl.txt",
"radio_tv_news.txt",
"school_essays.txt",
"scienceweb.txt",
"webmedia.txt",
"websites.txt",
"written-to-be-spoken.txt"
]
def read_file(file_name):
data = []
sentence = []
with open(file_name) as fh:
for line in fh.readlines():
if not line.strip() and sentence:
data.append(sentence)
sentence = []
continue
parts = line.strip().split()
if len(parts) >= 2:
w, t = parts[0], parts[1] # Take the first two items only
sentence.append((w, t))
return data
from collections import defaultdict
def calc_stats(data):
stats = defaultdict(int)
for sent in data:
stats["n_sentences"] += 1
for token, label in sent:
stats[label] += 1
return stats
import pprint
def get_total_stats():
total_stats = defaultdict(int)
for file_name in file_names:
d = read_file("data/"+file_name)
stats = calc_stats(d)
#print(f"--- [{file_name}]---")
#pprint.pprint(stats)
for k, v in stats.items():
total_stats[k] += v
#print("---- TOTAL ---- ")
#pprint.pprint(total_stats)
return total_stats
import random
random.seed(1)
def check_if_not_done(stats, total_stats, target):
for k, v in total_stats.items():
if v * target > stats[k]:
return True
return False
def create_splits(train=0.8, test=0.1, dev=0.1):
train_data = []
test_data = []
dev_data = []
total_stats = get_total_stats()
for file_name in file_names:
train_stats = defaultdict(int)
test_stats = defaultdict(int)
dev_stats = defaultdict(int)
d = read_file("data/"+file_name)
stats = calc_stats(d)
random.shuffle(d)
file_train = []
file_test = []
file_dev = []
for sent in d:
if check_if_not_done(test_stats, stats, test):
# TEST data
use = False
for token in sent:
w, tag = token
if tag == 'O':
continue
if test_stats[tag] < test * stats[tag] - 5:
use = True
if test_stats['n_sentences'] < test * stats['n_sentences'] - 5:
use = True
if use:
file_test.append(sent)
test_stats['n_sentences'] += 1
for w, t in sent:
test_stats[t] += 1
elif check_if_not_done(dev_stats, stats, dev):
# DEV DATA
use = False
for token in sent:
w, tag = token
if tag == 'O':
continue
if dev_stats[tag] < dev * stats[tag] - 5:
use = True
if dev_stats['n_sentences'] < dev * stats['n_sentences'] - 5:
use = True
if use:
file_dev.append(sent)
dev_stats['n_sentences'] += 1
for w, t in sent:
dev_stats[t] += 1
else:
file_train.append(sent)
train_stats['n_sentences'] += 1
for w, t in sent:
train_stats[t] += 1
else:
file_train.append(sent)
train_stats['n_sentences'] += 1
for w, t in sent:
train_stats[t] += 1
try:
assert len(d) == len(file_train) + len(file_dev) + len(file_test)
except:
import pdb; pdb.set_trace()
train_data += file_train
test_data += file_test
dev_data += file_dev
return train_data, test_data, dev_data
train, test, dev = create_splits()
total_stats = get_total_stats()
print("---- total -----")
pprint.pprint(total_stats)
print("----- test ----")
test_stats = calc_stats(test)
pprint.pprint(test_stats)
print("----- dev ----")
dev_stats = calc_stats(dev)
pprint.pprint(dev_stats)
print("----- train ----")
train_stats = calc_stats(train)
pprint.pprint(train_stats)
with open("train.txt", "w") as outf:
for sent in train:
for w, t in sent:
outf.writelines(f"{w} {t}\n")
outf.writelines("\n")
with open("test.txt", "w") as outf:
for sent in test:
for w, t in sent:
outf.writelines(f"{w} {t}\n")
outf.writelines("\n")
with open("dev.txt", "w") as outf:
for sent in dev:
for w, t in sent:
outf.writelines(f"{w} {t}\n")
outf.writelines("\n")