-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathwebcraw.py
More file actions
142 lines (116 loc) · 4.5 KB
/
webcraw.py
File metadata and controls
142 lines (116 loc) · 4.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import requests
from bs4 import BeautifulSoup
import argparse
from fake_useragent import UserAgent
from sys import exit
from rich.console import Console
def main():
console = Console()
args = argparse.ArgumentParser()
args.add_argument('-u', '--url', type=str, help='url to scan', required=True)
args.add_argument('-o', '--output', type=str, help='output file', default=False)
args.add_argument('-s', '--silent', action='store_true', help='silent mode', default=False)
args.add_argument('-g', '--only-good', action='store_true', help='only show good links', default=False)
args = args.parse_args()
ua = UserAgent()
random_ua = ua.random
if not args.silent:
hello(console)
headers = {
"User-Agent": random_ua
}
url = refactor_url(args.url)
if not args.silent:
print_info(console, f"URL: {url}")
result = parse(url, headers)
if result is None:
print_error(console, f'Invalid URL {url}')
exit(1)
host = find_host(url)
if not args.silent:
print_info(console, f"Host: {host}")
print_info(console, f"Fake User-Agent: {headers["User-Agent"]}")
queue = [url]
links_ = []
other_links = []
visited = {}
try:
while queue:
current_page = queue.pop(0)
if current_page not in list(visited.keys()):
response = parse(current_page, headers)
visited[current_page] = None
if response is None:
print_error(console, current_page)
else:
links_.append(current_page)
print_success(console, current_page)
soup = BeautifulSoup(response, 'html.parser')
links = soup.find_all('a')
for link_ in links:
try:
link = link_['href']
while link.startswith('/'):
link = link[1:]
if not link.startswith('http') and not link.startswith('www.'):
link = refactor_link(link, url)
if host not in link:
if link not in list(visited.keys()):
other_links.append(link)
visited[link] = None
if not args.only_good:
print_info(console, link)
else:
if link not in list(visited.keys()):
queue.append(link)
except KeyError:
link_ = str(link_).replace('\n', '').strip()
if not args.only_good:
print_error(console, f"{link_} has no href")
except KeyboardInterrupt:
print_info(console, "Program terminated by user\n")
if args.output:
links_arr = []
links_arr.extend(list(set(links_)))
with open(args.output, 'w') as f:
for link in list(set(links_arr)):
f.write(link + '\n')
with open(f'other_{args.output}', 'w') as f:
for link in other_links:
f.write(link + '\n')
def parse(url: str, headers: dict):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
else:
return None
except requests.exceptions.RequestException:
return None
def find_host(url: str) -> str:
url = url.replace('https://', '')
url = url.replace('http://', '')
return url.split('/')[0]
def refactor_url(url: str) -> str:
url = url.strip()
while url[-1] == '/' and url[-2] == '/':
url = url[:-1]
if url[-1] != '/':
url = url + '/'
if not url.startswith("http"):
url = f"http://{url}"
return url
def refactor_link(link: str, url: str) -> str:
if url not in link:
link = url + link
return link
def print_error(console, message: str) -> None:
console.print(f"[-] {message}", style="bold red")
def print_success(console, message: str) -> None:
console.print(f"[+] {message}", style="bold green")
def print_info(console, message: str) -> None:
console.print(f"[?] {message}", style="bold yellow")
def hello(console):
console.print(f"[yellow]WEBCRAW\nmade by 7eliassen[/yellow]\n")
if __name__ == '__main__':
main()