|
import datasets |
|
import pandas as pd |
|
import re |
|
import json |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {MovieLens Ratings}, |
|
author={Ismail Ashraq, James Briggs}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This is a dataset that streams user ratings from the MovieLens 25M dataset from the MovieLens servers. |
|
""" |
|
_HOMEPAGE = "https://grouplens.org/datasets/movielens/" |
|
|
|
_LICENSE = "" |
|
|
|
_URL = "https://files.grouplens.org/datasets/movielens/ml-25m.zip" |
|
|
|
class MovieLens(datasets.GeneratorBasedBuilder): |
|
"""The MovieLens 25M dataset for ratings""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"imdb_id": datasets.Value("string"), |
|
"movie_id": datasets.Value("int32"), |
|
"user_id": datasets.Value("int32"), |
|
"rating": datasets.Value("float32"), |
|
"title": datasets.Value("string"), |
|
"poster": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://grouplens.org/datasets/movielens/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
new_url = dl_manager.download_and_extract(_URL) |
|
|
|
|
|
movie_ids = pd.read_csv(new_url+"/ml-25m/links.csv") |
|
movie_meta = pd.read_csv(new_url+"/ml-25m/movies.csv") |
|
movie_ratings = pd.read_csv(new_url+"/ml-25m/ratings.csv") |
|
|
|
movies = movie_meta.merge(movie_ids, on="movieId") |
|
|
|
recent_movies = movies[movies["imdbId"].astype(int) >= 2000000].fillna("None") |
|
|
|
mask = movie_ratings['movieId'].isin(recent_movies["movieId"]) |
|
filtered_movie_ratings = movie_ratings[mask] |
|
|
|
df = filtered_movie_ratings.merge( |
|
recent_movies, on="movieId" |
|
).astype( |
|
{"movieId": int, "userId": int, "rating": float} |
|
) |
|
|
|
df = df.groupby("movieId").filter(lambda x: len(x) > 2) |
|
df = df.groupby("userId").filter(lambda x: len(x) > 2) |
|
|
|
unique_movieids = sorted(df["movieId"].unique()) |
|
mapping = {unique_movieids[i]: i for i in range(len(unique_movieids))} |
|
df["movie_id"] = df["movieId"].map(lambda x: mapping[x]) |
|
|
|
unique_userids = sorted(df["userId"].unique()) |
|
mapping = {unique_userids[i]: i for i in range(len(unique_userids))} |
|
df["user_id"] = df["userId"].map(lambda x: mapping[x]) |
|
|
|
df["imdb_id"] = df["imdbId"].apply(lambda x: "tt" + str(x)) |
|
|
|
posters = datasets.load_dataset("pinecone/movie-posters", split='train').to_pandas() |
|
df = df.merge(posters, left_on='imdb_id', right_on='imdbId') |
|
|
|
df = df[ |
|
["imdb_id", "movie_id", "user_id", "rating", "title", "poster"] |
|
] |
|
|
|
df.to_json(new_url+"/ratings.jsonl", orient="records", lines=True) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": new_url+"/ratings.jsonl"} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
with open(filepath, "r") as f: |
|
id_ = 0 |
|
for line in f: |
|
yield id_, json.loads(line) |
|
id_ += 1 |