Upload 6 files
Browse files- app.py +53 -0
- links.csv +0 -0
- movies.csv +0 -0
- ratings.csv +0 -0
- requirements.txt +3 -0
- tags.csv +0 -0
app.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import zipfile
|
4 |
+
import requests
|
5 |
+
import io
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
|
8 |
+
# Function to download and load data
|
9 |
+
def load_data():
|
10 |
+
url = "https://files.grouplens.org/datasets/movielens/ml-latest-small.zip"
|
11 |
+
response = requests.get(url)
|
12 |
+
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
|
13 |
+
|
14 |
+
ratings = pd.read_csv(zip_file.open('ml-latest-small/ratings.csv'))
|
15 |
+
movies = pd.read_csv(zip_file.open('ml-latest-small/movies.csv'))
|
16 |
+
|
17 |
+
data = pd.merge(ratings, movies, on='movieId')
|
18 |
+
return data
|
19 |
+
|
20 |
+
# Function to build user-item matrix and similarity matrix
|
21 |
+
def build_matrices(data):
|
22 |
+
user_item_matrix = data.pivot_table(index='userId', columns='title', values='rating').fillna(0)
|
23 |
+
user_similarity = cosine_similarity(user_item_matrix)
|
24 |
+
user_similarity_df = pd.DataFrame(user_similarity, index=user_item_matrix.index, columns=user_item_matrix.index)
|
25 |
+
return user_item_matrix, user_similarity_df
|
26 |
+
|
27 |
+
# Function to get recommendations
|
28 |
+
def get_recommendations(user_id, user_item_matrix, user_similarity_df, num_recommendations=5):
|
29 |
+
user_sim_scores = user_similarity_df[user_id]
|
30 |
+
similar_users = user_sim_scores.sort_values(ascending=False)
|
31 |
+
similar_users_ratings = user_item_matrix.loc[similar_users.index]
|
32 |
+
weighted_ratings = similar_users_ratings.T.dot(similar_users)
|
33 |
+
weighted_ratings = weighted_ratings / similar_users.sum()
|
34 |
+
recommendations = weighted_ratings.sort_values(ascending=False).head(num_recommendations)
|
35 |
+
return recommendations
|
36 |
+
|
37 |
+
# Load data and build matrices
|
38 |
+
data = load_data()
|
39 |
+
user_item_matrix, user_similarity_df = build_matrices(data)
|
40 |
+
|
41 |
+
# Streamlit app
|
42 |
+
st.title("Collaborative Filtering Recommendation System")
|
43 |
+
|
44 |
+
user_id = st.number_input("Enter User ID", min_value=1, max_value=user_similarity_df.index.max(), step=1)
|
45 |
+
|
46 |
+
if st.button("Get Recommendations"):
|
47 |
+
if user_id in user_similarity_df.index:
|
48 |
+
recommendations = get_recommendations(user_id, user_item_matrix, user_similarity_df)
|
49 |
+
st.write("Top Recommendations:")
|
50 |
+
for movie, score in recommendations.items():
|
51 |
+
st.write(f"{movie}: {score:.2f}")
|
52 |
+
else:
|
53 |
+
st.write("User ID not found.")
|
links.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
movies.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ratings.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pandas
|
3 |
+
scikit-learn
|
tags.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|