This repository was archived by the owner on May 27, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmarking.py
More file actions
83 lines (70 loc) · 2.55 KB
/
marking.py
File metadata and controls
83 lines (70 loc) · 2.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy
from sklearn.linear_model import SGDClassifier
import sklearn.metrics
from os import listdir
import pymorphy2
from sklearn import cross_validation
# Кроссвалидацию
# Pipeline
# Кластеризация
CLEAN = '/home/max/PycharmProjects/FULL_DATA/CLEAN_DATA/'
MARK = '/home/max/PycharmProjects/FULL_DATA/MARKS_DATA/'
WORD = '/home/max/PycharmProjects/FULL_DATA/WORDS_DATA/'
ALL_DATA = sorted([int(i.split('.')[0]) for i in listdir(CLEAN)])
WORD_DATA = sorted([int(i.split('.')[0]) for i in listdir(WORD)])
AllowedWords = '[а-я]+'
data = []
wordNormal = pymorphy2.MorphAnalyzer()
interesting, boring = [], []
marked = []
unmarked = []
markedD = []
unmarkedD = []
vectorizer = TfidfVectorizer()
metric = sklearn.metrics.roc_auc_score
cls = SGDClassifier(loss='log', alpha=0.00001, penalty='elasticnet', n_iter=10, n_jobs=-1)
def clean_by_words():
for i in ALL_DATA:
with open(CLEAN + str(i) + '.txt', 'r') as file:
text = file.read()
try:
open(WORD + str(i) + '.txt', 'r').read()
except FileNotFoundError:
with open(WORD + str(i) + '.txt', 'w') as wordfile:
data.append('')
for word in re.findall(AllowedWords, text.lower()):
normal = wordNormal.normal_forms(word)[0]
wordfile.write(normal + ' ')
data[-1] += normal + ' '
def check():
for i in WORD_DATA:
text = open(WORD + str(i) + '.txt', 'r').read()
try:
with open(MARK + str(i) + '.txt', 'r') as mark:
if mark.read() == '1':
interesting.append(i)
marked.append(i)
markedD.append(text)
except:
unmarked.append(i)
unmarkedD.append(text)
data = markedD + unmarkedD
X = vectorizer.fit_transform(data)
Y_train = numpy.array([1 if t in interesting else 0 for t in marked])
X_train = X[:len(markedD), :]
X_test = X[len(markedD):, :]
return [X_train, Y_train, X_test]
def make_an(X_train, Y_train, X_test):
cls.fit(X_train, Y_train)
every = [[unmarked[i[0]], cls.predict_proba(i[1])[0][1]] for i in enumerate(X_test)]
sor_every = sorted(every, key=lambda i: i[1])
return sor_every
def get_score(X_train, Y_train):
score = cross_validation.cross_val_score(cls, X_train, Y_train, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (score.mean(), score.std() * 2))
# 0.95
# a = check()
# make_an(a[0],a[1],a[2])
# get_score(a[0], a[1])