File size: 4,359 Bytes
57f8196
 
 
 
b77b27d
57f8196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2bb0fd7
57f8196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7728e0a
 
 
57f8196
 
7a6e543
57f8196
b77b27d
 
 
 
57f8196
b77b27d
 
57f8196
 
 
 
b77b27d
 
 
 
 
57f8196
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import datasets
import pandas as pd
import re
import json
from sklearn.model_selection import train_test_split

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {MovieLens Ratings},
author={Ismail Ashraq, James Briggs},
year={2022}
}
"""

_DESCRIPTION = """\
This is a dataset that streams user ratings from the MovieLens 25M dataset from the MovieLens servers.
"""
_HOMEPAGE = "https://grouplens.org/datasets/movielens/"

_LICENSE = ""

_URL = "https://files.grouplens.org/datasets/movielens/ml-25m.zip"

class MovieLens(datasets.GeneratorBasedBuilder):
    """The MovieLens 25M dataset for ratings"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "imdb_id": datasets.Value("string"),
                    "movie_id": datasets.Value("int32"),
                    "user_id": datasets.Value("int32"),
                    "rating": datasets.Value("float32"),
                    "title": datasets.Value("string"),
                    "poster": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://grouplens.org/datasets/movielens/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        new_url = dl_manager.download_and_extract(_URL)
        # PREPROCESS
        # load all files
        movie_ids = pd.read_csv(new_url+"/ml-25m/links.csv")
        movie_meta = pd.read_csv(new_url+"/ml-25m/movies.csv")
        movie_ratings = pd.read_csv(new_url+"/ml-25m/ratings.csv")
        # merge to create movies dataframe
        movies = movie_meta.merge(movie_ids, on="movieId")
        # keep only subset of recent movies
        recent_movies = movies[movies["imdbId"].astype(int) >= 2000000].fillna("None")
        # mask movie ratings for movies that exist in movies
        mask = movie_ratings['movieId'].isin(recent_movies["movieId"])
        filtered_movie_ratings = movie_ratings[mask]
        # merge with movies
        df = filtered_movie_ratings.merge(
            recent_movies, on="movieId"
        ).astype(
            {"movieId": int, "userId": int, "rating": float}
        )
        # remove user and movies which occurs only once in the dataset
        df = df.groupby("movieId").filter(lambda x: len(x) > 2)
        df = df.groupby("userId").filter(lambda x: len(x) > 2)
        # convert unique movie IDs to sequential index values
        unique_movieids = sorted(df["movieId"].unique())
        mapping = {unique_movieids[i]: i for i in range(len(unique_movieids))}
        df["movie_id"] = df["movieId"].map(lambda x: mapping[x])
        # get unique user sequential index values
        unique_userids = sorted(df["userId"].unique())
        mapping = {unique_userids[i]: i for i in range(len(unique_userids))}
        df["user_id"] = df["userId"].map(lambda x: mapping[x])
        # add "tt" prefix to align with IMDB URL IDs
        df["imdb_id"] = df["imdbId"].apply(lambda x: "tt" + str(x))
        # now add the movie posters
        posters = datasets.load_dataset("pinecone/movie-posters", split='train').to_pandas()
        df = df.merge(posters, left_on='imdb_id', right_on='imdbId')
        # we also don't need all columns
        df = df[
            ["imdb_id", "movie_id", "user_id", "rating", "title", "poster"]
        ]
        # create train-test split
        train, test = train_test_split(
            df, test_size=0.1, shuffle=True, stratify=df["movie_id"], random_state=0
        )
        # save
        train.to_json(new_url+"/train.jsonl", orient="records", lines=True)
        test.to_json(new_url+"/test.jsonl", orient="records", lines=True)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": new_url+"/train.jsonl"}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": new_url+"/test.jsonl"}
            ),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        with open(filepath, "r") as f:
            id_ = 0
            for line in f:
                yield id_, json.loads(line)
                id_ += 1