-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfeatures.py
More file actions
298 lines (252 loc) · 8.89 KB
/
features.py
File metadata and controls
298 lines (252 loc) · 8.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
# ------------ Imports ------------
# Serializaton
import joblib
# Data processing
import pandas as pd
import re
# Data structures
from collections import Counter
from scipy.sparse import csr_matrix, hstack
# Own functions
from preprocessing import get_processed_dfs
# ------------ Feature Extraction Functions ------------
def extract_pronouns(df: pd.DataFrame, pronouns_list: list[str]):
"""
in: df, list of pronouns
out: df with pronouns features
"""
print("Extracting pronouns...")
# create df
df_pronouns = pd.DataFrame()
df_pronouns["id"] = 0 # id column
# create pronoun columns
for pronoun in pronouns_list:
df_pronouns[f"pronoun_{pronoun}"] = 0
# loop through rows
for idx, row in df.iterrows():
id = row["id"] # get id
text = row["description_clean"] # get description
# loop through pronouns
for pronoun in pronouns_list:
column_name = f"pronoun_{pronoun}" # set column name
count = text.lower().split().count(pronoun) # count occurrences, split works as tokenizer
df_pronouns.loc[idx, "id"] = id # place id in pronoun df
df_pronouns.loc[idx, column_name] = count # put in counts
return df_pronouns
def extract_generics(df: pd.DataFrame, generics_list: list[str]):
"""
in: df, list of generics
out: df with generics features
"""
print("Extracting generics...")
# create df
df_generics = pd.DataFrame()
df_generics["id"] = 0 # id column
# create columns for generics
for generic in generics_list:
df_generics[f"generic_{generic}"] = 0
# loop through rows
for idx, row in df.iterrows():
id = row["id"] # get id
text = row["description_clean"] # get description
# loop through generics
for generic in generics_list:
column_name = f"generic_{generic}" # set column name
count = text.lower().split().count(generic) # count occurrences, split works as tokenizer
df_generics.loc[idx, "id"] = id # place id in generic df
df_generics.loc[idx, column_name] = count # put in counts
return df_generics
def extract_mentions(df: pd.DataFrame):
"""
in: df
out: df with mentions features
"""
print("Extracting mentions...")
mentions_list = ["ind", "pre", "pol", "grp"] # list of mentions
# create df
df_mentions = pd.DataFrame()
df_mentions["id"] = 0 # id column
# create mention columns
for mention in mentions_list:
df_mentions[f"mention_{mention}"] = 0
# loop through rows
for idx, row in df.iterrows():
id = row["id"] # get id
text = row["description_clean"] # get description
# loop through mentions
for mention in mentions_list:
column_name = f"mention_{mention}" # set column name
count = len(re.findall(rf"@{mention}\b", text)) # count occurrences
df_mentions.loc[idx, "id"] = id # place id in mention df
df_mentions.loc[idx, column_name] = count # put in counts
return df_mentions
def extract_word_n_grams(df: pd.DataFrame, words_of_interest: list[str]):
"""
in: df, list of words of interest
out: df with word n-grams
note: reducing n-grams on top-k was generated by GitHub Copilot
"""
print("Extracting word n-grams...")
# top k n-grams only
TOP_BIGRAMS = 500
TOP_TRIGRAMS = 100
# helper to produce n-grams from token list
def ngrams(tokens, n):
return [" ".join(tokens[i : i + n]) for i in range(len(tokens) - n + 1)] # tokens i + n for every token until too few tokens exist
# init counters and storage
bigram_counter = Counter()
trigram_counter = Counter()
rows_bigrams = []
rows_trigrams = []
interest_set: set[str] = {w.lower() for w in words_of_interest} # change list into set
for idx, row in df.iterrows(): # loop through rows
text = row.get("description_clean") # get cleaned text
tokens = str(text).strip().split() # tokenize by words
# generate bigrams and trigrams per row
bigrams_all: list[str] = ngrams(tokens, 2) if len(tokens) >= 2 else [] # fill bigram stroage
trigrams_all: list[str] = ngrams(tokens, 3) if len(tokens) >= 3 else [] # fill trigram storage
# filter n-grams by words of interest
bigrams = [
bigram # keep bigram
for bigram in bigrams_all # for whole list
if any(word in interest_set for word in bigram.split()) # if bigram contains any word of interest
]
trigrams = [
trigram
for trigram in trigrams_all
if any(word in interest_set for word in trigram.split())
] # see above
# add to storage
rows_bigrams.append(bigrams)
rows_trigrams.append(trigrams)
# update counters
bigram_counter.update(bigrams)
trigram_counter.update(trigrams)
# pick top-k most common n-grams
top_bigrams = [ng for ng, _ in bigram_counter.most_common(TOP_BIGRAMS)] # keep most k-common only
top_trigrams = [ng for ng, _ in trigram_counter.most_common(TOP_TRIGRAMS)]
# build column names
bigram_cols = [f"ngram_bi_{bg.replace(' ', '_')}" for bg in top_bigrams] # column name cant contain space so use _
trigram_cols = [f"ngram_tri_{tg.replace(' ', '_')}" for tg in top_trigrams]
col_names = bigram_cols + trigram_cols
# prepare dataframe with zeros
df_ngrams = pd.DataFrame(0, index=df.index, columns=["id"] + col_names)
# fill id and counts per row (threw errors, fixed by GitHub Copilot)
for idx, row in df.iterrows():
df_ngrams.at[idx, "id"] = row.get("id", idx)
b_counter = Counter(rows_bigrams[idx])
t_counter = Counter(rows_trigrams[idx])
# top k
for bigram, col in zip(top_bigrams, bigram_cols):
df_ngrams.at[idx, col] = b_counter.get(bigram, 0)
for trigram, col in zip(top_trigrams, trigram_cols):
df_ngrams.at[idx, col] = t_counter.get(trigram, 0)
return df_ngrams
# ------------ Feature Extraction Pipeline ------------
def feature_extraction_pipeline(df):
"""
in: df
out: df with all features
"""
pronouns_list = [
"du",
"er",
"sie",
"es",
"wir",
"ihr",
"dich",
"ihn",
"uns",
"euch",
"dir",
"ihm",
"ihnen",
"dein",
"deine",
"deiner",
"deines",
"deinem",
"deinen",
"sein",
"seine",
"seiner",
"seines",
"seinem",
"seinen",
"ihr",
"ihre",
"ihrer",
"ihres",
"ihrem",
"ihren",
"unser",
"unsere",
"unserer",
"unseres",
"unserem",
"unseren",
"euer",
"eure",
"eurer",
"eures",
"eurem",
"euren",
] # List of all pronouns
generics_list = [
"jeder",
"alle",
"leute",
"man",
"lieber",
"liebe",
"freunde",
"gruppe",
"jemand",
"politik",
"menschen",
"gesellschaft",
"gemeinschaft",
"volk",
"buerger",
"welt",
"nation",
"pegida",
"bevoelkerung",
"der",
"die",
"das",
] # list of generics
words_of_interest = pronouns_list + generics_list # append lists
# get all feature dfs by calling functions
df_pronouns = extract_pronouns(df, pronouns_list)
df_generics = extract_generics(df, generics_list)
df_mentions = extract_mentions(df)
df_word_ngrams = extract_word_n_grams(df, words_of_interest)
# merge all feature dataframes using 'id'
df_features = (
df_pronouns.merge(df_generics, on="id")
.merge(df_mentions, on="id")
.merge(df_word_ngrams, on="id")
) # join all feature dfs
return df_features
# ------------ Get Feature Matrices (to import in main) ------------
def get_model_matrices(csv_path: str, vec_path: str):
"""
in: csv file path, verctorizer save path
out: X_train, y_train
"""
print(f"-"*50 + "Preprocessing Data" + f"-"*50 + "\n")
X_train, X_train_tfidf = get_processed_dfs(csv_path, vec_path) # get dfs prom preprocessing.py
y_train = X_train["TAR"] # get labels
print(f"-"*50 + "Starting Feature Extraction" + f"-"*50 + "\n")
df_features_train = feature_extraction_pipeline(X_train) # call pipeline for feature extraction
# convert and combine features and tfidf to sparse matrix
mat_features_train = csr_matrix(df_features_train.drop(columns=["id"]).values)
X_train: csr_matrix = hstack([mat_features_train, X_train_tfidf])
return X_train, y_train
# ------------ Test ------------
if __name__ == "__main__":
X_train, y_train = get_model_matrices("../tar.csv", "./model")
print(X_train)
print(y_train)