|
""" |
|
This script converts the data from the raw data to CSV files. |
|
|
|
Usage: |
|
make newsSpace |
|
python convert.py |
|
""" |
|
|
|
import csv |
|
import html |
|
import os |
|
import sys |
|
|
|
import pandas as pd |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
from sklearn.model_selection import train_test_split |
|
|
|
HEADER = [ |
|
"source", |
|
"url", |
|
"title", |
|
"image", |
|
"category", |
|
"description", |
|
"rank", |
|
"pubdate", |
|
] |
|
|
|
OUTPUT_FILE = "ag_news.csv" |
|
TRAIN_OUTPUT_FILE = "train.csv" |
|
TEST_OUTPUT_FILE = "test.csv" |
|
|
|
|
|
def _clean_text(text): |
|
text = text.replace("\\\n", "\n") |
|
text = html.unescape(text) |
|
|
|
if text == "\\N": |
|
return "" |
|
|
|
return text |
|
|
|
|
|
def _clean_html(text): |
|
html_code = _clean_text(text) |
|
html_code.replace("</p>", "\n\n</p>") |
|
html_code.replace("<br>", "\n") |
|
|
|
soup = BeautifulSoup(html_code, "html.parser") |
|
text = soup.get_text(separator=" ") |
|
|
|
text = text.replace(" \n", "\n").replace("\n ", "\n") |
|
|
|
|
|
lines = [line.strip() for line in text.split("\n")] |
|
|
|
return "\n".join(lines) |
|
|
|
|
|
def _clean_image(image): |
|
if image == "none": |
|
return None |
|
return image |
|
|
|
|
|
def _clean_rank(rank): |
|
return int(rank) |
|
|
|
|
|
def run(): |
|
rows = [] |
|
categories = set() |
|
|
|
with open("newsSpace", encoding="ISO-8859-15") as f: |
|
doc = f.read() |
|
|
|
for row in doc.split("\t\\N\n"): |
|
if not row: |
|
continue |
|
|
|
row = row.replace("\\\t", "") |
|
|
|
try: |
|
source, url, title, image, category, description, rank, pubdate = row.split( |
|
"\t" |
|
) |
|
except ValueError: |
|
print(repr(row)) |
|
sys.exit(1) |
|
|
|
categories.add(category) |
|
|
|
obj = { |
|
"source": source, |
|
"url": url, |
|
"title": _clean_text(title), |
|
"image": _clean_image(image), |
|
"category": category, |
|
"description": _clean_text(description), |
|
"rank": _clean_rank(rank), |
|
"pubdate": pubdate, |
|
"text": _clean_html(description), |
|
} |
|
|
|
rows.append(obj) |
|
|
|
|
|
_categories = list(categories) |
|
_categories.sort() |
|
|
|
save_categories(_categories) |
|
|
|
for row in rows: |
|
row["label"] = _categories.index(row["category"]) |
|
|
|
save_csv(rows) |
|
split_csv_train_test(test_size=0.2, random_state=42) |
|
|
|
|
|
def save_csv(rows, fname=OUTPUT_FILE): |
|
""" |
|
Save the processed data into a CSV file. |
|
""" |
|
with open(fname, "w", encoding="utf8") as f: |
|
writer = csv.DictWriter(f, fieldnames=rows[0].keys()) |
|
writer.writeheader() |
|
|
|
for row in rows: |
|
writer.writerow(row) |
|
|
|
|
|
def split_csv_train_test(**kwargs): |
|
""" |
|
Split the data into training and testing sets. |
|
""" |
|
df = pd.read_csv(OUTPUT_FILE) |
|
train_df, test_df = train_test_split(df, **kwargs) |
|
train_df.to_csv(TRAIN_OUTPUT_FILE, index=False) |
|
test_df.to_csv(TEST_OUTPUT_FILE, index=False) |
|
|
|
|
|
def save_categories(categories, fname="categories.txt"): |
|
""" |
|
Save the categories into a text file. |
|
""" |
|
with open(fname, "w") as f: |
|
for category in categories: |
|
f.write(category + os.linesep) |
|
|
|
if __name__ == "__main__": |
|
run() |
|
|