-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
49 lines (34 loc) · 1.37 KB
/
scraper.py
File metadata and controls
49 lines (34 loc) · 1.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import requests
from bs4 import BeautifulSoup
def get_israeli_swimmers():
url = "https://en.wikipedia.org/wiki/Category:Olympic_swimmers_for_Israel"
# --- ADD A USER-AGENT ---
# This tells Wikipedia: "I am a browser, not a bot."
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
print(f"Connecting to {url}...")
# Pass the headers here
response = requests.get(url, headers=headers)
# Check if it worked (200 means OK)
if response.status_code != 200:
print(f"Error: Status Code {response.status_code}. Wikipedia blocked us.")
return []
soup = BeautifulSoup(response.text, "html.parser")
names = []
category_div = soup.find("div", {"id": "mw-pages"})
if category_div:
links = category_div.find_all("a")
for link in links:
name = link.text
# Basic cleanup
if "Category:" not in name and "Template:" not in name:
names.append(name)
return names
if __name__ == "__main__":
swimmers = get_israeli_swimmers()
print(f"Found {len(swimmers)} swimmers!")
with open("swimmers_list.txt", "w", encoding="utf-8") as f:
for name in swimmers:
f.write(name + "\n")
print("✅ Successfully saved names to 'swimmers_list.txt'")