amazon_reviews_2013 / convert.py
1T Conte
fix: split train and test set
5ea2e73
raw
history blame
No virus
4.85 kB
"""
Convert the Amazon reviews dataset to parquet format.
Usage:
$ make download
$ python convert.py
"""
import os
import gzip
from glob import glob
import pandas as pd
from sklearn.model_selection import train_test_split
OUTPUT_DIR = "amazon_reviews_2013"
CHUNK_SIZE = 2000000
TEST_SIZE = 0.2
CATEGORIES = {
"Amazon_Instant_Video.txt.gz": "Amazon Instant Video", # 717,651 reviews
"Arts.txt.gz": "Arts", # 27,980 reviews
"Automotive.txt.gz": "Automotive", # 188,728 reviews
"Baby.txt.gz": "Baby", # 184,887 reviews
"Beauty.txt.gz": "Beauty", # 252,056 reviews
"Books.txt.gz": "Book", # 12,886,488 reviews
"Cell_Phones_&_Accessories.txt.gz": "Cell Phone", # 78,930 reviews
"Clothing_&_Accessories.txt.gz": "Clothing", # 581,933 reviews
"Electronics.txt.gz": "Electronics", # 1,241,778 reviews
"Gourmet_Foods.txt.gz": "Gourmet Food", # 154,635 reviews
"Health.txt.gz": "Health", # 428,781 reviews
"Home_&_Kitchen.txt.gz": "Home & Kitchen", # 991,794 reviews
"Industrial_&_Scientific.txt.gz": "Industrial & Scientific", # 137,042 reviews
"Jewelry.txt.gz": "Jewelry", # 58,621 reviews
"Kindle_Store.txt.gz": "Kindle Store", # 160,793 reviews
"Movies_&_TV.txt.gz": "Movie & TV", # 7,850,072 reviews
"Musical_Instruments.txt.gz": "Musical Instrument", # 85,405 reviews
"Music.txt.gz": "Music", # 6,396,350 reviews
"Office_Products.txt.gz": "Office", # 138,084 reviews
"Patio.txt.gz": "Patio", # 206,250 reviews
"Pet_Supplies.txt.gz": "Pet Supply", # 217,170 reviews
"Shoes.txt.gz": "Shoe", # 389,877 reviews
"Software.txt.gz": "Software", # 95,084 reviews
"Sports_&_Outdoors.txt.gz": "Sports & Outdoor", # 510,991 reviews
"Tools_&_Home_Improvement.txt.gz": "Tools & Home Improvement", # 409,499 reviews
"Toys_&_Games.txt.gz": "Toy & Game", # 435,996 reviews
"Video_Games.txt.gz": "Video Game", # 463,669 reviews
"Watches.txt.gz": "Watch", # 68,356 reviews
}
CATEGORIES_LIST = list(CATEGORIES.values())
def to_parquet():
"""
Convert a single file to parquet
"""
n_chunks = 0
data = []
for filename in CATEGORIES:
for entry in parse_file(filename):
data.append(entry)
if len(data) == CHUNK_SIZE:
save_parquet(data, n_chunks)
data = []
n_chunks += 1
if data:
save_parquet(data, n_chunks)
n_chunks += 1
return n_chunks
def save_parquet(data, chunk):
"""
Save data to parquet
"""
fname_train = os.path.join(OUTPUT_DIR, f"train-{chunk:04d}-of-nchunks.parquet")
fname_test = os.path.join(OUTPUT_DIR, f"test-{chunk:04d}-of-nchunks.parquet")
df = pd.DataFrame(data)
df_train, df_test = train_test_split(df, test_size=TEST_SIZE, random_state=42)
df_train.to_parquet(fname_train)
df_test.to_parquet(fname_test)
def parse_file(filename):
"""
Parse a single file
"""
f = gzip.open(filename, "r")
entry = {}
for line in f:
line = line.decode().strip()
colon_pos = line.find(":")
if colon_pos == -1:
entry["product/category"] = CATEGORIES[filename]
yield entry
entry = {}
continue
e_name = line[:colon_pos]
rest = line[colon_pos + 2 :]
entry[e_name] = rest
yield entry
def clean(entry):
"""
Clean the entry
"""
if entry["product/price"] == "unknown":
entry["product/price"] = None
else:
entry["product/price"] = float(entry["product/price"])
entry["review/score"] = int(entry["review/score"])
entry["review/time"] = int(entry["review/time"])
entry["product/category"] = CATEGORIES_LIST.index(entry["product/category"])
numerator, demoninator = entry["review/helpfulness"].split("/")
numerator = int(numerator)
demoninator = int(demoninator)
if demoninator == 0:
entry["review/helpfulness_ratio"] = 0
else:
entry["review/helpfulness_ratio"] = numerator / demoninator
entry["review/helpfulness_total_votes"] = demoninator
# Remove entries
del entry["review/userId"]
del entry["review/profileName"]
del entry["product/productId"]
return entry
def rename_chunks(n_chunks):
"""
Replace nchunks in filename by the actual number of chunks
"""
for fname in glob(os.path.join(OUTPUT_DIR, "*-of-nchunks.parquet")):
new_fname = fname.replace("-nchunks", f"-{n_chunks:04d}")
os.rename(fname, new_fname)
def run():
"""
Convert all files to parquet
"""
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
n_chunks = to_parquet()
print(f"{n_chunks} chunks saved")
rename_chunks(n_chunks)
if __name__ == "__main__":
run()