-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path@WebScan.py
More file actions
108 lines (96 loc) · 3.79 KB
/
@WebScan.py
File metadata and controls
108 lines (96 loc) · 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import requests
from bs4 import BeautifulSoup
import json
import sys
import time
def animated(text):
for x in text:
sys.stdout.write(x)
sys.stdout.flush()
time.sleep(0.01)
logo = '''
__ __ ___. _________
/ \ / \ ____\_ |__ / _____/_____ ___.__.
\ \/\/ // __ \| __ \ \_____ \\____ < | |
\ /\ ___/| \_\ \/ \ |_> >___ |
\__/\ / \___ >___ /_______ / __// ____|
\/ \/ \/ \/|__| \/
'''
animated(logo)
print(" »»»»Coder By White_Devil«««« ")
username = "Admin"
password = "Kali1234"
givenUsername = input(" Enter Your username: ")
if givenUsername == username:
print(" Correct Username ")
givenPassword = input(" Enter Your Password: ")
if givenPassword == password:
print(" Correct Password ")
else:
print(" Wrong Username ")
print('Login sucess')
logo = '''
▓█████▄ ▓█████ ██▒ █▓ ██▓ ██▓
▒██▀ ██▌▓█ ▀▓██░ █▒▓██▒▓██▒
░██ █▌▒███ ▓██ █▒░▒██▒▒██░
░▓█▄ ▌▒▓█ ▄ ▒██ █░░░██░▒██░
░▒████▓ ░▒████▒ ▒▀█░ ░██░░██████▒
▒▒▓ ▒ ░░ ▒░ ░ ░ ▐░ ░▓ ░ ▒░▓ ░
░ ▒ ▒ ░ ░ ░ ░ ░░ ▒ ░░ ░ ▒ ░
░ ░ ░ ░ ░░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░
░ ░
'''
animated(logo)
print('»»»Scripted By:@White_Devil«««')
def get_website_info(url):
try:
# Send a GET request to the website
response = requests.get(url)
# If the GET request is successful, the status code will be 200
if response.status_code == 200:
# Get the content of the response
page_content = response.content
# Create a BeautifulSoup object and specify the parser
soup = BeautifulSoup(page_content, 'html.parser')
# Find all the links in the page
links = soup.find_all('a')
# Initialize an empty list to store the website information
website_info = []
# Iterate over the links
for link in links:
# Get the href attribute of the link
href = link.get('href')
# If the href attribute exists
if href:
# If the href attribute starts with http, it's a full URL
if href.startswith('http'):
website_info.append({
'url': href,
'text': link.text
})
else:
# If the href attribute doesn't start with http, it's a relative URL
# We need to join it with the base URL to get the full URL
base_url = url
full_url = base_url + href
website_info.append({
'url': full_url,
'text': link.text
})
# Print the website information
print(json.dumps(website_info, indent=2))
else:
print('Failed to retrieve the website information. Status code: {}'.format(response.status_code))
except Exception as e:
print('An error occurred: {}'.format(e))
# Test the function with a URL:
get_website_info('https://ciaindia.org.in/')
get_website_info('https://facebook.com/')
get_website_info('https://www.expressvpn.com/blog/best-onion-sites-on-dark-web/')
get_website_info
get_website_info
get_website_info
get_website_info
get_website_info