-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrecommender.py
More file actions
179 lines (146 loc) Β· 6.16 KB
/
recommender.py
File metadata and controls
179 lines (146 loc) Β· 6.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import pandas as pd
import numpy as np
import re
import string
from typing import List, Optional, Tuple
STOP_WORDS = {
"the","a","an","and","or","of","in","on","for","to","with","at","by","is",
"it","this","that","from","as","are","be","was","were","but","not","so",
"if","their","they","them","he","she","his","her"
}
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
def clean_text(text: str) -> str:
if not isinstance(text, str):
return ""
text = text.lower()
text = re.sub(f"[{re.escape(string.punctuation)}]", " ", text)
tokens = [t for t in text.split() if t not in STOP_WORDS]
return " ".join(tokens)
class MovieRecommender:
def __init__(self, csv_path: str):
self.csv_path = csv_path
self.movies: Optional[pd.DataFrame] = None
self.vectorizer: Optional[TfidfVectorizer] = None
self.feature_matrix = None
self.nn_model: Optional[NearestNeighbors] = None
def load_and_prepare(self):
df = pd.read_csv(self.csv_path)
# Keep only useful columns if they exist
keep_cols = [
"id",
"title",
"genre",
"original_language",
"overview",
"popularity",
"release_date",
"vote_average",
"vote_count",
]
present_cols = [c for c in keep_cols if c in df.columns]
df = df[present_cols]
# Basic cleaning: drop rows without title
df = df.dropna(subset=[c for c in ["title"] if c in df.columns]).reset_index(drop=True)
# Fill missing text fields safely
if "genre" in df.columns:
df["genre"] = df["genre"].fillna("")
else:
df["genre"] = ""
if "overview" in df.columns:
df["overview"] = df["overview"].fillna("")
else:
df["overview"] = ""
# Combine genre + overview into a single "document"
df["combined_text"] = (df["genre"].astype(str) + " " + df["overview"].astype(str)).apply(clean_text)
self.movies = df
def build_vectorizer(self):
# Safe defaults β can be tuned
self.vectorizer = TfidfVectorizer(
max_features=20000,
ngram_range=(1, 2),
min_df=2,
)
self.feature_matrix = self.vectorizer.fit_transform(self.movies["combined_text"])
def build_nn_model(self):
# cosine distance = 1 - cosine similarity
self.nn_model = NearestNeighbors(
metric="cosine", algorithm="brute", n_neighbors=21
)
self.nn_model.fit(self.feature_matrix)
def fit(self):
self.load_and_prepare()
self.build_vectorizer()
self.build_nn_model()
def _get_index_from_title(self, title: str) -> Optional[int]:
mask = self.movies["title"].astype(str).str.lower() == title.lower()
matches = self.movies[mask]
if matches.empty:
return None
return matches.index[0]
def recommend_similar_by_title(self, title: str, top_n: int = 10) -> Tuple[Optional[pd.DataFrame], Optional[str]]:
idx = self._get_index_from_title(title)
if idx is None:
return None, f"Movie '{title}' not found in the dataset."
# Use +1 to include the movie itself and then skip it
distances, indices = self.nn_model.kneighbors(
self.feature_matrix[idx], n_neighbors=top_n + 1
)
indices = indices[0][1:]
distances = distances[0][1:]
results = self.movies.iloc[indices].copy()
results["similarity"] = 1 - distances
return results, None
def recommend_from_favourites(self, titles: List[str], top_n: int = 10) -> Tuple[Optional[pd.DataFrame], Optional[str]]:
# Collect indices for known titles
indices = []
for t in titles:
idx = self._get_index_from_title(t)
if idx is not None:
indices.append(idx)
if not indices:
return None, "None of the selected movies were found."
# Create a mean user profile vector (sparse matrix)
user_profile = self.feature_matrix[indices].mean(axis=0)
# Convert to dense array usable by kneighbors
if hasattr(user_profile, "toarray"):
user_profile = user_profile.toarray()
else:
user_profile = np.asarray(user_profile)
# Ensure shape is (1, n_features)
if user_profile.ndim == 1:
user_profile = user_profile.reshape(1, -1)
# Ask for more neighbors than top_n to allow removal of the favourites themselves
distances, neighbour_idxs = self.nn_model.kneighbors(
user_profile, n_neighbors=min(self.nn_model.n_neighbors, top_n + len(indices) + 5)
)
neighbour_idxs = neighbour_idxs[0]
distances = distances[0]
# Remove the favourite indices from the results
filtered = []
filtered_distances = []
for nid, dist in zip(neighbour_idxs, distances):
if nid not in indices:
filtered.append(nid)
filtered_distances.append(dist)
if len(filtered) >= top_n:
break
results = self.movies.iloc[filtered].copy()
results["similarity"] = 1 - np.array(filtered_distances)
return results, None
def get_genres(self):
genres = set()
for g in self.movies["genre"].dropna():
for part in str(g).split(","):
part = part.strip()
if part:
genres.add(part)
return sorted(genres)
def filter_by_genres(self, selected_genres: List[str]):
if not selected_genres:
return self.movies
def has_all(genres_str: str) -> bool:
movie_genres = {g.strip().lower() for g in str(genres_str).split(",") if g.strip()}
return all(g.lower() in movie_genres for g in selected_genres)
mask = self.movies["genre"].apply(has_all)
return self.movies[mask]