-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSentiment_User.py
More file actions
37 lines (29 loc) · 1.37 KB
/
Sentiment_User.py
File metadata and controls
37 lines (29 loc) · 1.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import nltk
from nltk.corpus import movie_reviews, stopwords
from nltk.classify import NaiveBayesClassifier
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
nltk.download("movie_reviews")
nltk.download("punkt")
nltk.download("stopwords")
nltk.download("wordnet")
stop_words = set(stopwords.words("english"))
lemmatizer = WordNetLemmatizer()
def preprocess(text):
tokens = word_tokenize(text.lower())
tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalpha() and token not in stop_words]
return {word: True for word in tokens}
pos_reviews = [(movie_reviews.raw(fileid), "positive") for fileid in movie_reviews.fileids("pos")]
neg_reviews = [(movie_reviews.raw(fileid), "negative") for fileid in movie_reviews.fileids("neg")]
all_reviews = pos_reviews + neg_reviews
processed_data = [(preprocess(text), label) for (text, label) in all_reviews]
train_data, val_data = train_test_split(processed_data, test_size=0.2, random_state=42)
classifier = NaiveBayesClassifier.train(train_data)
n = int(input("How many texts do you want to analyze? "))
print()
for i in range(n):
user_text = input(f"Enter text {i+1}: ")
features = preprocess(user_text)
prediction = classifier.classify(features)
print(f"'{user_text}' → {prediction}")