ag_news / convert.py
1T Conte
fix: adjusting configs
d13fb4b
raw
history blame
No virus
4.41 kB
"""
This script converts the data from the raw data to CSV files.
Usage:
make newsSpace
python convert.py
"""
import csv
import html
import os
import sys
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.model_selection import train_test_split
HEADER = [
"source",
"url",
"title",
"image",
"category",
"description",
"rank",
"pubdate",
]
OUTPUT_FILE = "ag_news.csv"
TRAIN_OUTPUT_FILE = "train.csv"
TEST_OUTPUT_FILE = "test.csv"
def _clean_text(text):
text = text.replace("\\\n", "\n")
text = html.unescape(text)
if text == "\\N":
return ""
return text
def _clean_html(text):
html_code = _clean_text(text)
html_code.replace("</p>", "\n\n</p>")
html_code.replace("<br>", "\n")
soup = BeautifulSoup(html_code, "html.parser")
text = soup.get_text(separator=" ")
text = text.replace(" \n", "\n").replace("\n ", "\n")
# remove extra spaces at the beginning of the text
lines = [line.strip() for line in text.split("\n")]
return "\n".join(lines)
def _clean_image(image):
if image == "none":
return None
return image
def _clean_rank(rank):
return int(rank)
def run():
"""
Run the conversion process.
"""
rows = []
categories = set()
with open("newsSpace", encoding="ISO-8859-15") as f:
doc = f.read()
for row in doc.split("\t\\N\n"):
if not row:
continue
row = row.replace("\\\t", "")
try:
source, url, title, image, category, description, rank, pubdate = row.split(
"\t"
)
except ValueError:
print(repr(row))
sys.exit(1)
categories.add(category)
obj = {
"source": source,
"url": url,
"title": _clean_text(title),
"image": _clean_image(image),
"category": category,
"description": _clean_text(description),
"rank": _clean_rank(rank),
"pubdate": pubdate,
"text": _clean_html(description),
}
rows.append(obj)
# Add a label to each row
_categories = list(categories)
_categories.sort()
save_categories(_categories)
for row in rows:
row["label"] = _categories.index(row["category"])
save_csv(rows)
split_csv_train_test(test_size=0.2, random_state=42)
filter_and_split_csv(["World", "Sports", "Business", "Sci/Tech"], "top4")
def save_csv(rows, fname=OUTPUT_FILE):
"""
Save the processed data into a CSV file.
"""
with open(fname, "w", encoding="utf8") as f:
writer = csv.DictWriter(f, fieldnames=rows[0].keys())
writer.writeheader()
for row in rows:
writer.writerow(row)
def split_csv_train_test(**kwargs):
"""
Split the data into training and testing sets.
"""
df = pd.read_csv(OUTPUT_FILE)
train_df, test_df = train_test_split(df, **kwargs)
os.makedirs("data/all", exist_ok=True)
train_df.to_csv(os.path.join("data", "all", TRAIN_OUTPUT_FILE), index=False)
test_df.to_csv(os.path.join("data", "all", TEST_OUTPUT_FILE), index=False)
def filter_and_split_csv(categories, config_name, is_balanced=True, **kwargs):
"""
Filter the data by categories and split the data into training and testing
sets. If is_balanced is True, the data will be balanced to size of the
class with fewer examples.
"""
df = pd.read_csv(OUTPUT_FILE)
if is_balanced:
dfs = []
for category in categories:
_df = df[df["category"] == category]
dfs.append(_df)
min_size = min([len(_df) for _df in dfs])
dfs = [df.sample(min_size) for df in dfs]
df = pd.concat(dfs)
else:
df = df[df["category"].isin(categories)]
os.makedirs(f"data/{config_name}", exist_ok=True)
train_df, test_df = train_test_split(df, **kwargs)
train_df.to_csv(os.path.join("data", config_name, TRAIN_OUTPUT_FILE), index=False)
test_df.to_csv(os.path.join("data", config_name, TEST_OUTPUT_FILE), index=False)
def save_categories(categories, fname="categories.txt"):
"""
Save the categories into a text file.
"""
with open(fname, "w") as f:
for category in categories:
f.write(category + os.linesep)
if __name__ == "__main__":
run()