-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconstructScraper.py
More file actions
153 lines (108 loc) · 4.93 KB
/
constructScraper.py
File metadata and controls
153 lines (108 loc) · 4.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import json
import requests
import sys
from bs4 import BeautifulSoup
from collections import OrderedDict
# A simple script to find files for online Construct games
class constructScrape():
# Take lists in the data files and flatten them so they're easier to iterate through
def flatten(self, dataList):
if isinstance(dataList, list):
for x in dataList:
yield from self.flatten(x)
else:
yield dataList
# extract files from data.json or data.js
def dataFileExtraction(self, baseUrl, dataFile):
print('[] Extracting urls from main data file')
fileList = []
fileTypes = ['.png','.jpg','.otf']
mediaTypes = ['.ogg','.m4a','.web','.webm']
dataContent = requests.get(baseUrl + dataFile).json()['project'] or None
flatData = self.flatten(dataContent)
tempList = list(self.flatten(dataContent))
mediaDir = dataContent[8]
i = 0
for x in flatData:
if type(x) is str and x[-4:] in fileTypes:
fileList.append(baseUrl + x)
elif type(x) is str and x[-4:] in mediaTypes:
fileList.append(baseUrl + mediaDir + x)
elif type(x) is str and x in mediaTypes:
fileList.append(baseUrl + mediaDir + tempList[i - 2] + x)
i = i + 1
fileList.append(baseUrl + 'style.css')
fileList.append(baseUrl + 'scripts/main.js')
fileList.append(baseUrl + 'scripts/c3runtime.js')
fileList.append(baseUrl + 'scripts/jobworker.js')
fileList.append(baseUrl + 'scripts/dispatchworker.js')
manifestData = requests.get(baseUrl + 'appmanifest.json').json()
for x in manifestData['icons']:
fileList.append(baseUrl + x['src'])
return fileList
# extract files from offline.json or offline.js
def offlineFileExtraction(self, baseUrl, dataFile):
print('[] Extracting urls from offline data file')
fileList = []
json_data = requests.get(baseUrl + dataFile) or None
if json_data:
json_data = json_data.json()
for x in json_data['fileList']:
fileList.append(baseUrl + x)
else:
print('! The data file content could not be loaded')
return fileList
# Find the file containing the asset information
def findDataFile(self,subUrl):
possibleFiles = ['offline.json','offline.js','data.json','data.js']
existingFile = None
for x in possibleFiles:
if existingFile == None:
fileExists = requests.get(subUrl + x).status_code == 200
if fileExists:
existingFile = x
if existingFile == None:
print('! Asset file could not be found')
else:
print('[] The existing file is ' + existingFile)
return existingFile
def __init__(self, embedUrl):
print('[] Starting extraction for: ' + embedUrl)
downloadUrls = []
subUrl = embedUrl
if subUrl[-1] != '/':
subUrl = embedUrl.rsplit('/',1)[0] + '/'
# get embed html data
print('[] Getting url data')
embedData = requests.get(embedUrl)
embedSoup = BeautifulSoup(embedData.content, 'html.parser')
downloadUrls.append(embedUrl)
downloadUrls.append(subUrl + 'appmanifest.json')
downloadUrls.append(subUrl + 'sw.js')
# Grab js files from embed
embedScripts = embedSoup.findAll('script')
for x in embedScripts:
if 'src' in x.attrs:
if x['src'][:4] == 'http':
downloadUrls.append(x['src'])
else:
downloadUrls.append(subUrl + x['src'])
dataFile = self.findDataFile(subUrl)
downloadUrls.append(subUrl + dataFile)
# Workflow to extract files from 'offline' or 'data' files
if dataFile[:7] == 'offline':
downloadUrls += self.offlineFileExtraction(subUrl, dataFile)
elif dataFile[:4] == 'data':
downloadUrls += self.dataFileExtraction(subUrl,dataFile)
fileName = 'constructAssets.txt'
saveFile = open(fileName, 'a')
for x in list(dict.fromkeys(downloadUrls)):
fileUrl = x.replace(' ','%20')
saveFile.write(fileUrl)
saveFile.write("\n")
saveFile.close()
print('[] Wrote ' + str(len(downloadUrls)) + ' files!')
if len(sys.argv) < 2:
print('Usage: python constructScrape.py [embedUrl]')
else:
constructScrape(sys.argv[1])