-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmech_scrape.py
More file actions
291 lines (245 loc) · 12.7 KB
/
mech_scrape.py
File metadata and controls
291 lines (245 loc) · 12.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
import os
import pandas as pd
import re
import requests
from requests import get
class mechScraper(object):
"""
"""
def __init__(self):
"""
Set initial class variables:
- mech data urls
"""
self.light_url = "https://wiki.mwomercs.com/index.php?title=Light_Mechs&action=edit"
self.medium_url = "https://wiki.mwomercs.com/index.php?title=Medium_Mechs&action=edit"
self.heavy_url = "https://wiki.mwomercs.com/index.php?title=Heavy_Mechs&action=edit"
self.assault_url = "https://wiki.mwomercs.com/index.php?title=Assault_Mechs&action=edit"
self.output_path = "../output/"
def get_mech_df(self, url=None):
"""
Scrapes page data from a passed URL to extract:
- mech names
- mech tonnage
- mech weight class
returns the data as a pandas dataframe
"""
#check if URL was supplied
if not url:
print("must pass URL")
return
#scrape passed URL
print("scraping " + url)
page = requests.get(url)
page_string = page.text
#set webscrape regex patterns
mech_obj = re.compile(r'===\s[\w\s-]+[\s()A-Z0-9-]*\s===')
tonnage_obj = re.compile(r'Tonnage[\']*:[\s\d+]+')
chassis_obj = re.compile(r'Var\w\wnts[\']+:[\sa-zA-Z0-9-,]+')
hero_obj = re.compile(r'[\']+Hero[\']+:[,\s[()\.\'\w-]+')
champ_obj = re.compile(r'[\']+Champion[\']+:\s?[+\s[()\w-]+')
special_obj = re.compile(r'[\']+Special[\']+:\s?[\/,\s[()\w-]*')
#get matching name, tonnage, and variant list
mech_results = mech_obj.finditer(page_string)
tonnage_results = tonnage_obj.finditer(page_string)
chassis_results = chassis_obj.finditer(page_string)
hero_results = hero_obj.finditer(page_string)
champion_results = champ_obj.finditer(page_string)
special_results = special_obj.finditer(page_string)
#clean regex results to get desired text for each mech: name, weight, chassis variants
mech_names = [mech_name.group().replace("===", "").strip() for mech_name in mech_results]
mech_weights = [mech_weight.group().replace("\n", "")[-3:].strip() for mech_weight in tonnage_results]
#get base chassis variants
#chassis variants is a list of lists
chassis_variants = [chassis.group().replace("\n","")[12:].replace(",","").split() for chassis in chassis_results]
#clean scrape data for hero variants
hero_variants = [hero.group().replace("\n","")[11:].strip() for hero in hero_results]
hero_names = [hero[:hero.find("(")].strip() for hero in hero_variants]
#correct for missing single quote in web data
hero_names = [hero.replace("'''Special''","") for hero in hero_names]
for i in range(len(hero_variants)):
#fix Archer Tempest hero typo
if "ACR-T" in hero_variants[i]:
hero_variants[i] = hero_variants[i].replace("ACR-T", "ARC-T")
print("Archer Tempest fixed \n\n")
if "(" in hero_variants[i]:
#take from open parenthesis to the right
hero_variants[i] = hero_variants[i][hero_variants[i].index("("):].replace("'''Special'''","")
if "," in hero_variants[i]:
hero_variants[i] = hero_variants[i].split(",")
for j in range(len(hero_variants[i])):
if "(" in hero_variants[i][j]:
hero_variants[i][j] = hero_variants[i][j][hero_variants[i][j].find("(")+1:]
hero_variants[i][j] = hero_variants[i][j].replace(")","")
else:
hero_variants[i] = [hero_variants[i].replace("'''Special'''","").replace("(","").replace(")","")]
#process scrape data for champion variants
#convert to list from regex object
champion_variants = [champ.group() for champ in champion_results]
#split "champion" out of chassis designation
champion_variants = [champ[champ.index(":")+1:].strip().replace(" ", "") for champ in champion_variants]
#remove blank entries
champion_variants = [champ for champ in champion_variants if champ != "n"]
#process scrape data for special variants to remove clutter
#convert to list from regex
special_variants = [spec.group() for spec in special_results]
#remove "special" from chassis designation
special_variants = [spec[spec.index(":")+1:].strip().replace(" ","") for spec in special_variants]
special_list = [] #use list to hold all special variants as there are fewer than number of chassis
for i in range(len(special_variants)):
if "," in special_variants[i]:
special_variants[i] = special_variants[i].split(",")
else:
special_variants[i] = [special_variants[i]]
#convert special variants to single list
for j in range(len(special_variants[i])):
special_list.append(special_variants[i][j])
#Fix errors in screen pull data
for i in range(len(special_list)):
if special_list[i] == "ACR-2R(S)":
print("Archer special fixed")
special_list[i] = "ARC-2R(S)"
if special_list[i] == "SMNM-F(L)SMN-M(L)":
special_list[i] = "SMNM-F(L)"
special_list.append("SMN-M(L)")
print("Fixing SMNM-F(L) and SMNM-F(L)")
for i in range(len(hero_names)):
if hero_names[i] == "Wrat":
hero_names[i] = "Wrath"
print(hero_names[i])
if hero_names[i] == "Hi Ther":
hero_names[i] = "Hi There"
for i in range(len(hero_variants)):
if hero_variants[i][0] == "HMN-PK":
hero_variants[i][0] = "HMN-PA"
print("Fixing HMN-PK: ", hero_variants[i])
if hero_variants[i][0] == "EBJ-ESP":
hero_variants[i][0] = "EBJ-EC"
if hero_variants[i][0] == "MKII-DS":
hero_variants[i][0] = "MCII-DS"
print()
#FIXME: fafnir wrath is missing h in hero name
#convert lists to dict as preprocess for converstion to dataframe
mech_dict = {
"mechs":mech_names,
"tonnage":mech_weights,
"variants":chassis_variants,
"hero_chassis":hero_variants,
"hero_names":hero_names
}
mech_df = pd.DataFrame(mech_dict)
#match special variants to base chassis to get weight data
#use 3 letter chassis designation as match key
mech_df["special_variants"] = ""
for index, row in mech_df.iterrows():
add_specials = []
for i in range(len(special_list)):
#check for clan IIC model (disambiguation from inner sphere variants)
if "IIC" in row["variants"][0]:
clan = True
else:
clan = False
mech_letters = row["variants"][0][:3].upper()
if clan:
if mech_letters == special_list[i][:3].upper() and "IIC" in special_list[i]:
add_specials.append(special_list[i])
else:
if mech_letters == special_list[i][:3].upper() and "IIC" not in special_list[i]:
add_specials.append(special_list[i])
mech_df.at[index, "special_variants"] = add_specials
#match champion variants to base chassis to get weight data
#use 3 letter chassis designation as match key
mech_df["champion_variants"] = ""
for index, row in mech_df.iterrows():
add_champions = []
for i in range(len(champion_variants)):
#check for clan IIC model (disambiguation from inner sphere variants)
if "IIC" in row["variants"][0]:
clan = True
else:
clan = False
mech_letters = row["variants"][0][:3].upper()
if clan:
if mech_letters == champion_variants[i][:3].upper() and "IIC" in champion_variants[i]:
add_specials.append(special_list[i])
else:
if mech_letters == champion_variants[i][:3].upper() and "IIC" not in champion_variants[i]:
add_champions.append(champion_variants[i])
mech_df.at[index, "champion_variants"] = add_champions
mech_df = mech_df[["mechs", "tonnage","hero_names", "hero_chassis", "variants",
"special_variants", "champion_variants"]]
return mech_df
def save_data(self, data, weight_class, output_path=None):
"""
Writes a pandas df to disc.
Uses the weight class as a name for pipe-delimited text file.
"""
if not output_path:
output_path = self.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
print("saving data for " + weight_class)
data.to_csv(output_path + weight_class + ".txt", sep="|", index=False)
def main(self):
"""
Scrapes URLs for mech data and compiles them to
pandas dataframes before writing them to disk.
"""
assault_mech_df = self.get_mech_df(url=self.assault_url)
heavy_mech_df = self.get_mech_df(url=self.heavy_url)
medium_mech_df = self.get_mech_df(url=self.medium_url)
light_mech_df = self.get_mech_df(url=self.light_url)
all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df,
light_mech_df])
self.save_data(assault_mech_df, "assault")
self.save_data(heavy_mech_df, "heavy")
self.save_data(medium_mech_df, "medium")
self.save_data(light_mech_df, "light")
self.save_data(all_weights_df, "all_weights")
#get maximum new columns needed for splitting variants
max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()
melt_cols = []
for i in range(max_cols):
all_weights_df["var_"+str(i)] = ""
melt_cols.append("var_"+str(i))
variant_weights_df = pd.DataFrame()
for index, row in all_weights_df.iterrows():
for i in range(len(row["variants"])):
#add each variant to variant weights as a row with mech, tonnage, variant
new_row_dict = {
"mech_name":row["mechs"],
"tonnage":row["tonnage"],
"variant":row["variants"][i].upper()
}
new_row_df = pd.DataFrame(new_row_dict, index=[0])
variant_weights_df = pd.concat([variant_weights_df, new_row_df])
for i in range(len(row["hero_chassis"])):
new_row_dict = {
"mech_name":row["hero_names"],
"tonnage":row["tonnage"],
"variant":row["hero_chassis"][i].upper()
}
new_row_df = pd.DataFrame(new_row_dict, index=[0])
variant_weights_df = pd.concat([variant_weights_df, new_row_df])
for i in range(len(row["special_variants"])):
new_row_dict = {
"mech_name":row["mechs"],
"tonnage":row["tonnage"],
"variant":row["special_variants"][i].upper()
}
new_row_df = pd.DataFrame(new_row_dict, index=[0])
variant_weights_df = pd.concat([variant_weights_df, new_row_df])
#add champion variants by matching on
for i in range(len(row["champion_variants"])):
new_row_dict = {
"mech_name":row["mechs"],
"tonnage":row["tonnage"],
"variant":row["champion_variants"][i].upper()
}
new_row_df = pd.DataFrame(new_row_dict, index=[0])
variant_weights_df = pd.concat([variant_weights_df, new_row_df])
#remove duplicate rows
variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep="first")==False]
self.save_data(variant_weights_df, "variant_weights")
if __name__ =="__main__":
mechScraper().main()