-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrape.py
More file actions
56 lines (43 loc) · 1.54 KB
/
scrape.py
File metadata and controls
56 lines (43 loc) · 1.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import selenium.webdriver as webdriver
from selenium.webdriver.chrome.service import Service
from bs4 import BeautifulSoup
import time
def scrape_website(website):
chrome_drive_path = "./chromedriver.exe"
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=Service(chrome_drive_path),options=options)
try:
driver.get(website)
html = driver.page_source
time.sleep(0.2)
return html
finally:
driver.quit()
def extract_body_content(html_content):
soup = BeautifulSoup(html_content, "html.parser")
body_content = soup.body
if body_content:
return str(body_content)
return ""
def clean_body_content(html_content):
soup = BeautifulSoup(html_content, "html.parser")
for script_style in soup(["script","style"]):
script_style.extract()
# get text in html and split to each text is \n
clean_content = soup.get_text(separator="\n")
# cach 1
clean_content = "\n".join(
line.strip() for line in clean_content.splitlines() if line.strip()
)
# cach 2
# clean_lines = []
# for line in clean_content.splitlines():
# clean_line = line.strip()
# if clean_line:
# clean_lines.append(clean_line)
# clean_content = "\n".join(clean_lines)
return clean_content
def split_dom_content(dom_content,max_length = 6000):
return [
dom_content[i : i+ max_length] for i in range(0,len(dom_content),max_length)
]