-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper_authenticated.py
More file actions
767 lines (649 loc) · 34 KB
/
scraper_authenticated.py
File metadata and controls
767 lines (649 loc) · 34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
#!/usr/bin/env python3
"""
Authenticated LinkedIn Scraper
Handles LinkedIn login and authenticated scraping to bypass security restrictions.
"""
import os
import json
import time
from typing import Dict, Optional
from bs4 import BeautifulSoup
from langchain_core.tools import Tool
from dotenv import load_dotenv
load_dotenv()
# LinkedIn credentials from environment
LINKEDIN_EMAIL = os.environ.get("LINKEDIN_EMAIL")
LINKEDIN_PASSWORD = os.environ.get("LINKEDIN_PASSWORD")
class AuthenticatedLinkedInScraper:
"""
LinkedIn scraper with authentication support
"""
def __init__(self):
self.session = None
self.is_logged_in = False
def _setup_playwright_session(self):
"""Setup Playwright with persistent session"""
try:
from playwright.sync_api import sync_playwright
# Clear any existing session to start fresh
user_data_dir = os.path.abspath(".playwright/linkedin_session")
if os.path.exists(user_data_dir):
import shutil
try:
shutil.rmtree(user_data_dir)
print("🗑️ Cleared existing session cache")
except:
pass
os.makedirs(user_data_dir, exist_ok=True)
self.playwright = sync_playwright().start()
self.browser = self.playwright.chromium.launch_persistent_context(
user_data_dir,
headless=False, # Keep visible for manual login if needed
args=[
"--disable-blink-features=AutomationControlled",
"--disable-web-security",
"--disable-features=VizDisplayCompositor",
"--disable-dev-shm-usage",
"--no-sandbox"
]
)
if len(self.browser.pages) == 0:
self.page = self.browser.new_page()
else:
self.page = self.browser.pages[0]
return True
except ImportError:
print("❌ Playwright not available. Install with: pip install playwright")
return False
except Exception as e:
print(f"❌ Failed to setup Playwright: {e}")
return False
def _check_login_status(self) -> bool:
"""Check if already logged into LinkedIn"""
try:
self.page.goto("https://www.linkedin.com/feed/", wait_until="domcontentloaded", timeout=10000)
# Check if we're on the feed page (logged in) or redirected to login
current_url = self.page.url
if "feed" in current_url or "mynetwork" in current_url:
print("✅ Already logged into LinkedIn")
return True
else:
print("❌ Not logged into LinkedIn")
return False
except Exception as e:
print(f"❌ Error checking login status: {e}")
return False
def _perform_login(self) -> bool:
"""Perform LinkedIn login with enhanced automation"""
if not LINKEDIN_EMAIL or not LINKEDIN_PASSWORD:
print("⚠️ LinkedIn credentials not found in environment variables")
print("💡 Please add LINKEDIN_EMAIL and LINKEDIN_PASSWORD to your .env file")
print("🔧 Or login manually in the browser window that opens")
return self._manual_login_prompt()
try:
print(f"🔐 Attempting automatic LinkedIn login for: {LINKEDIN_EMAIL}")
# Go to LinkedIn login page
self.page.goto("https://www.linkedin.com/login", wait_until="domcontentloaded")
# Use the new credential filling method
return self._fill_credentials_and_login()
except Exception as e:
print(f"❌ Login error: {e}")
print("🔄 Falling back to manual login...")
return self._manual_login_prompt()
def _handle_login_challenges(self) -> bool:
"""Handle 2FA, CAPTCHA, and other login challenges"""
try:
# Check for 2FA challenge
if self.page.is_visible('input[name="pin"]', timeout=5000):
print("🔐 2FA challenge detected")
print("📱 Please enter the 2FA code from your device...")
# Wait for user to enter 2FA code
self.page.wait_for_function(
"document.querySelector('input[name=\"pin\"]').value.length >= 6",
timeout=60000 # 1 minute timeout
)
# Click continue button
continue_btn = self.page.locator('button[type="submit"]')
if continue_btn.is_visible():
continue_btn.click()
self.page.wait_for_load_state("networkidle", timeout=15000)
return True
# Check for email verification
if "verify" in self.page.url.lower() or self.page.is_visible('text="verify"', timeout=3000):
print("📧 Email verification required")
print("💡 Please check your email and click the verification link")
print("⏳ Waiting for verification...")
# Wait for user to complete email verification
input("Press Enter after completing email verification...")
self.page.reload()
self.page.wait_for_load_state("networkidle", timeout=15000)
return True
# Check for CAPTCHA
if self.page.is_visible('[data-test-id="captcha"]', timeout=3000) or "captcha" in self.page.content().lower():
print("🤖 CAPTCHA detected")
print("🧩 Please solve the CAPTCHA manually...")
# Wait for CAPTCHA to be solved
input("Press Enter after solving the CAPTCHA...")
return True
return False
except Exception as e:
print(f"⚠️ Challenge handling error: {e}")
return False
def _fill_credentials_and_login(self) -> bool:
"""Automatically fill credentials and login from current page"""
if not LINKEDIN_EMAIL or not LINKEDIN_PASSWORD:
print("❌ LinkedIn credentials not found in environment variables")
return False
try:
print("📧 Filling in email...")
# Wait for email field and fill it
self.page.wait_for_selector('input[name="session_key"]', timeout=10000)
self.page.fill('input[name="session_key"]', "")
self.page.type('input[name="session_key"]', LINKEDIN_EMAIL, delay=100)
print("🔑 Filling in password...")
# Fill password
self.page.fill('input[name="session_password"]', "")
self.page.type('input[name="session_password"]', LINKEDIN_PASSWORD, delay=100)
# Add small delay to simulate human behavior
print("⏳ Simulating human behavior...")
self.page.wait_for_timeout(2000)
# Click login button
print("🖱️ Clicking login button...")
login_button = self.page.locator('button[type="submit"]')
login_button.click()
# Wait for navigation
print("⏳ Waiting for login to complete...")
self.page.wait_for_load_state("networkidle", timeout=20000)
# Check for 2FA or verification challenges
if self._handle_login_challenges():
print("🔐 Handled login challenges successfully")
# Check if login was successful
if self._check_login_status():
print("✅ Automatic login successful!")
return True
else:
print("❌ Automatic login failed")
return False
except Exception as e:
print(f"❌ Error during credential filling: {e}")
return False
def _manual_login_prompt(self) -> bool:
"""Prompt user to login manually"""
print("\n" + "="*60)
print("🔐 MANUAL LOGIN REQUIRED")
print("="*60)
print("1. A browser window should be open")
print("2. Please login to LinkedIn manually")
print("3. Once logged in, press Enter to continue...")
print("4. Or type 'skip' to skip authentication")
print("="*60)
user_input = input("Press Enter when logged in (or 'skip'): ").strip().lower()
if user_input == 'skip':
print("⚠️ Skipping authentication - limited functionality")
return False
# Check if login was successful
return self._check_login_status()
def scrape_profile(self, linkedin_url: str) -> Dict:
"""
Scrape LinkedIn profile with authentication - Enhanced with automatic login handling
"""
print("🗑️ Starting fresh scraping session...")
# Always start with a fresh session
if not self._setup_playwright_session():
return self._fallback_scraping(linkedin_url)
try:
# First, ensure we're logged in
print("🔐 Checking login status...")
if not self._check_login_status():
print("❌ Not logged in, attempting automatic login...")
if not self._perform_login():
print("❌ Automatic login failed, trying manual login...")
if not self._manual_login_prompt():
print("⚠️ Proceeding without authentication - may have limited access")
# Navigate to the profile with enhanced retry logic
print(f"🔍 Navigating to: {linkedin_url}")
# Try multiple navigation attempts with automatic login handling
for attempt in range(5): # Increased attempts
try:
print(f"🔄 Attempt {attempt + 1}: Navigating to profile...")
self.page.goto(linkedin_url, wait_until="domcontentloaded", timeout=30000)
# Wait for content to load
self.page.wait_for_timeout(3000)
# Check current page status
page_content = self.page.content()
page_title = self.page.title().lower()
current_url = self.page.url
print(f"📍 Current URL: {current_url}")
print(f"📄 Page title: {page_title[:50]}...")
# Scenario 1: Redirected to login page
if "login" in current_url.lower() or "sign in" in page_title or "join" in page_title:
print(f"⚠️ Attempt {attempt + 1}: Redirected to login page")
print("🔐 Automatically filling credentials and logging in...")
# Fill credentials and login
if self._fill_credentials_and_login():
print("✅ Login successful, retrying profile access...")
# After successful login, try to navigate to the profile again
self.page.goto(linkedin_url, wait_until="domcontentloaded", timeout=30000)
self.page.wait_for_timeout(3000)
continue
else:
print("❌ Login failed, trying next attempt...")
continue
# Scenario 2: Security verification page
elif "security verification" in page_content.lower() or "verify" in page_title or "authwall" in current_url.lower():
print(f"⚠️ Attempt {attempt + 1}: Encountered security verification")
# Try different bypass strategies
if attempt < 3:
print("🔄 Trying different navigation approach...")
# Try without www or with different parameters
alt_url = linkedin_url.replace('www.', '') if 'www.' in linkedin_url else linkedin_url + '?trk=public_profile_browsemap'
self.page.goto(alt_url, wait_until="domcontentloaded", timeout=15000)
self.page.wait_for_timeout(3000)
continue
else:
print("🔐 Trying to handle security verification...")
return self._handle_security_verification(linkedin_url)
# Scenario 3: Successfully loaded profile
elif "profile" in current_url.lower() or "in/" in current_url.lower():
print(f"✅ Successfully loaded profile page on attempt {attempt + 1}")
break
# Scenario 4: Other pages (feed, home, etc.)
else:
print(f"⚠️ Attempt {attempt + 1}: Unexpected page, trying to navigate directly...")
# Try direct navigation with different approach
if attempt < 3:
self.page.goto(linkedin_url, wait_until="domcontentloaded", timeout=15000)
continue
else:
print("❌ Unable to reach profile page")
return self._fallback_scraping(linkedin_url)
except Exception as e:
print(f"❌ Navigation attempt {attempt + 1} failed: {e}")
if attempt < 4:
print("🔄 Waiting before retry...")
self.page.wait_for_timeout(3000)
continue
else:
print("❌ All navigation attempts failed")
raise e
# Extract profile data
print("🔍 Extracting profile data...")
profile_data = self._extract_profile_data()
# Clean up
self._cleanup()
return profile_data
except Exception as e:
print(f"❌ Scraping error: {e}")
self._cleanup()
return self._fallback_scraping(linkedin_url)
def _extract_profile_data(self) -> Dict:
"""Extract REAL profile data from the current page with enhanced selectors"""
try:
# Wait for dynamic content to load
self.page.wait_for_timeout(3000)
# Scroll to trigger lazy loading
self.page.evaluate("window.scrollTo(0, document.body.scrollHeight/3)")
self.page.wait_for_timeout(1000)
self.page.evaluate("window.scrollTo(0, 0)")
self.page.wait_for_timeout(1000)
# Get page content
html = self.page.content()
soup = BeautifulSoup(html, 'html.parser')
print("🔍 Extracting REAL profile data...")
profile_data = {}
# Enhanced name extraction with more selectors
name_selectors = [
'h1.text-heading-xlarge',
'h1.break-words',
'.pv-text-details__left-panel h1',
'.ph5 h1',
'h1[data-generated-suggestion-target]',
'.pv-top-card--list h1',
'.top-card-layout__title',
'h1.top-card-layout__title',
'.pv-entity__summary-info h1',
'h1.inline.t-24.v-align-middle.break-words'
]
name_found = False
for selector in name_selectors:
try:
name_elem = soup.select_one(selector)
if name_elem and name_elem.get_text(strip=True):
name_text = name_elem.get_text(strip=True)
if name_text and name_text not in ['LinkedIn', 'Sign Up', 'Join LinkedIn']:
profile_data['full_name'] = name_text
name_found = True
print(f"✅ Found name with selector '{selector}': {name_text}")
break
except Exception as e:
continue
# Try Playwright selectors if BeautifulSoup fails
if not name_found:
try:
playwright_name_selectors = [
'h1.text-heading-xlarge',
'h1.break-words',
'.pv-text-details__left-panel h1',
'h1'
]
for selector in playwright_name_selectors:
try:
element = self.page.locator(selector).first
if element.is_visible():
name_text = element.inner_text().strip()
if name_text and name_text not in ['LinkedIn', 'Sign Up', 'Join LinkedIn']:
profile_data['full_name'] = name_text
name_found = True
print(f"✅ Found name with Playwright selector '{selector}': {name_text}")
break
except:
continue
except Exception as e:
print(f"⚠️ Playwright name extraction failed: {e}")
# Enhanced headline extraction
headline_selectors = [
'.text-body-medium.break-words',
'.pv-text-details__left-panel .text-body-medium',
'.ph5 .text-body-medium',
'.top-card-layout__headline',
'.pv-top-card--list .text-body-medium',
'.pv-entity__summary-info .text-body-medium',
'div.text-body-medium.break-words'
]
headline_found = False
for selector in headline_selectors:
try:
headline_elem = soup.select_one(selector)
if headline_elem and headline_elem.get_text(strip=True):
headline_text = headline_elem.get_text(strip=True)
if headline_text and headline_text not in ['Professional', 'LinkedIn Member']:
profile_data['headline'] = headline_text
headline_found = True
print(f"✅ Found headline with selector '{selector}': {headline_text[:50]}...")
break
except Exception as e:
continue
# Try Playwright for headline if BeautifulSoup fails
if not headline_found:
try:
playwright_headline_selectors = [
'.text-body-medium.break-words',
'.pv-text-details__left-panel .text-body-medium'
]
for selector in playwright_headline_selectors:
try:
element = self.page.locator(selector).first
if element.is_visible():
headline_text = element.inner_text().strip()
if headline_text and headline_text not in ['Professional', 'LinkedIn Member']:
profile_data['headline'] = headline_text
headline_found = True
print(f"✅ Found headline with Playwright selector '{selector}': {headline_text[:50]}...")
break
except:
continue
except Exception as e:
print(f"⚠️ Playwright headline extraction failed: {e}")
# Enhanced profile picture extraction
img_selectors = [
'.pv-top-card-profile-picture__image',
'.profile-photo-edit__preview',
'img.pv-top-card-profile-picture__image--show',
'.top-card-layout__entity-image img',
'.pv-top-card--list img',
'img[data-delayed-url]'
]
for selector in img_selectors:
try:
img_elem = soup.select_one(selector)
if img_elem:
img_src = img_elem.get('src') or img_elem.get('data-delayed-url')
if img_src and 'linkedin' in img_src:
profile_data['profile_pic_url'] = img_src
print(f"✅ Found profile picture: {img_src[:50]}...")
break
except Exception as e:
continue
# Enhanced about/summary extraction
about_selectors = [
'.pv-shared-text-with-see-more',
'.inline-show-more-text',
'.pv-about-section',
'.pv-about__summary-text',
'.pv-oc .pv-about-section',
'section[data-section="summary"] .pv-about__summary-text'
]
for selector in about_selectors:
try:
about_elem = soup.select_one(selector)
if about_elem and about_elem.get_text(strip=True):
summary_text = about_elem.get_text(strip=True)
if len(summary_text) > 10: # Ensure it's meaningful content
profile_data['summary'] = summary_text
print(f"✅ Found summary: {summary_text[:100]}...")
break
except Exception as e:
continue
# Extract additional information
try:
# Location
location_selectors = [
'.pv-text-details__left-panel .text-body-small',
'.top-card-layout__first-subline',
'.pv-top-card--list .text-body-small'
]
for selector in location_selectors:
try:
location_elem = soup.select_one(selector)
if location_elem and location_elem.get_text(strip=True):
location_text = location_elem.get_text(strip=True)
if location_text and 'connection' not in location_text.lower():
profile_data['location'] = location_text
print(f"✅ Found location: {location_text}")
break
except:
continue
except Exception as e:
print(f"⚠️ Location extraction failed: {e}")
# Set intelligent defaults
if not profile_data.get('full_name'):
# Try to extract from page title as last resort
title = soup.find('title')
if title:
title_text = title.get_text().replace(' | LinkedIn', '').strip()
if title_text and title_text not in ['LinkedIn', 'Sign Up']:
profile_data['full_name'] = title_text
print(f"✅ Extracted name from title: {title_text}")
else:
profile_data['full_name'] = 'LinkedIn Profile'
else:
profile_data['full_name'] = 'LinkedIn Profile'
if not profile_data.get('headline'):
profile_data['headline'] = 'LinkedIn Professional'
if not profile_data.get('summary'):
profile_data['summary'] = profile_data.get('headline', 'LinkedIn Professional')
if not profile_data.get('profile_pic_url'):
profile_data['profile_pic_url'] = ''
print(f"✅ Successfully extracted REAL profile data:")
print(f" Name: {profile_data['full_name']}")
print(f" Headline: {profile_data['headline'][:50]}...")
print(f" Has Summary: {len(profile_data.get('summary', '')) > 20}")
print(f" Has Picture: {bool(profile_data.get('profile_pic_url'))}")
return profile_data
except Exception as e:
print(f"❌ Error extracting profile data: {e}")
return {
'full_name': 'Profile Extraction Failed',
'headline': 'Unable to extract profile data',
'summary': f'Error extracting profile data: {str(e)}',
'profile_pic_url': ''
}
def _handle_security_verification(self, linkedin_url: str) -> Dict:
"""Enhanced security verification handling with multiple strategies"""
print("🔐 Handling security verification with advanced techniques...")
try:
# Strategy 1: Try different navigation approaches
navigation_strategies = [
lambda: self.page.goto(linkedin_url.replace('www.', ''), wait_until="domcontentloaded"),
lambda: self.page.goto(linkedin_url + '?trk=public_profile_browsemap', wait_until="domcontentloaded"),
lambda: self.page.goto(linkedin_url.replace('https://www.', 'https://'), wait_until="domcontentloaded"),
]
for i, strategy in enumerate(navigation_strategies, 1):
try:
print(f"🔄 Trying navigation strategy {i}...")
strategy()
self.page.wait_for_timeout(3000)
# Check if we bypassed security
page_content = self.page.content().lower()
if "security verification" not in page_content and "sign up" not in page_content:
print(f"✅ Strategy {i} successful - bypassed security!")
return self._extract_profile_data()
except Exception as e:
print(f"❌ Strategy {i} failed: {e}")
continue
# Strategy 2: Try to interact with security page elements
print("🔐 Trying to interact with security verification elements...")
# Look for various continue/proceed buttons
continue_selectors = [
'button[data-test-id="continue-button"]',
'button:has-text("Continue")',
'button:has-text("Proceed")',
'button[type="submit"]',
'.challenge-form button',
'[data-test-id="challenge-form"] button'
]
for selector in continue_selectors:
try:
if self.page.is_visible(selector, timeout=2000):
print(f"🔘 Found button: {selector}")
self.page.click(selector)
self.page.wait_for_load_state("networkidle", timeout=10000)
# Check if successful
page_content = self.page.content().lower()
if "security verification" not in page_content:
print("✅ Successfully bypassed security verification!")
return self._extract_profile_data()
except Exception as e:
continue
# Strategy 3: Try to extract what we can from the current page
print("🔍 Attempting to extract available data from security page...")
# Sometimes LinkedIn shows partial data even on security pages
try:
# Try to get basic info from meta tags or page title
title = self.page.title()
if title and '|' in title:
name_part = title.split('|')[0].strip()
if name_part and name_part not in ['LinkedIn', 'Sign Up', 'Security']:
print(f"✅ Extracted name from title: {name_part}")
# Try to get more info from meta tags
og_description = self.page.locator('meta[property="og:description"]').get_attribute('content')
return {
'full_name': name_part,
'headline': og_description or 'LinkedIn Professional',
'summary': og_description or f'{name_part} is a LinkedIn professional.',
'profile_pic_url': '',
'security_partial': True
}
except Exception as e:
print(f"⚠️ Could not extract partial data: {e}")
# Strategy 4: Return informative response about security block
print("❌ All security bypass strategies failed")
return {
'full_name': 'Profile Access Restricted',
'headline': 'LinkedIn Security Verification Required',
'summary': 'This LinkedIn profile is protected by security verification. The profile exists but requires additional verification to access full details.',
'profile_pic_url': '',
'security_blocked': True,
'url_attempted': linkedin_url
}
except Exception as e:
print(f"❌ Security verification handling failed: {e}")
return {
'full_name': 'Security Verification Error',
'headline': 'Unable to Handle Security Check',
'summary': f'Encountered security verification that could not be bypassed: {str(e)}',
'profile_pic_url': '',
'security_blocked': True,
'error': str(e)
}
def _fallback_scraping(self, linkedin_url: str) -> Dict:
"""Fallback scraping method without authentication"""
print("🔄 Trying fallback scraping method...")
try:
import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = {
'User-Agent': ua.random,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
response = requests.get(linkedin_url, headers=headers, timeout=10)
soup = BeautifulSoup(response.content, 'html.parser')
# Extract basic info from public view
title = soup.find('title')
if title:
title_text = title.get_text()
if '| LinkedIn' in title_text:
name = title_text.replace('| LinkedIn', '').strip()
return {
'full_name': name,
'headline': 'LinkedIn Professional',
'summary': f'Public LinkedIn profile for {name}',
'profile_pic_url': '',
'public_only': True
}
except Exception as e:
print(f"❌ Fallback scraping failed: {e}")
return {
'full_name': 'Profile Not Accessible',
'headline': 'LinkedIn Authentication Required',
'summary': 'This LinkedIn profile requires authentication to access. Please set up LinkedIn credentials.',
'profile_pic_url': '',
'auth_required': True
}
def _cleanup(self):
"""Clean up browser resources"""
try:
if hasattr(self, 'browser'):
self.browser.close()
if hasattr(self, 'playwright'):
self.playwright.stop()
except:
pass
def scrape_linkedin_authenticated(linkedin_url: str) -> Dict:
"""
Scrape LinkedIn profile with authentication support
Creates a fresh scraper instance for each request to avoid cache issues
"""
# Create a fresh scraper instance for each request
scraper = AuthenticatedLinkedInScraper()
return scraper.scrape_profile(linkedin_url)
# LangChain tool
authenticated_scraper_tool = Tool(
name="Authenticated LinkedIn Scraper",
func=scrape_linkedin_authenticated,
description="Advanced LinkedIn scraper with authentication support to bypass security restrictions"
)
# Test function
def test_authenticated_scraper():
"""Test the authenticated scraper"""
test_urls = [
"https://www.linkedin.com/in/liveankit",
"https://www.linkedin.com/in/williamhgates/",
"https://www.linkedin.com/in/satyanadella/"
]
print("🧪 Testing Authenticated LinkedIn Scraper")
print("=" * 50)
for url in test_urls:
print(f"\n🔍 Testing: {url}")
try:
result = scrape_linkedin_authenticated(url)
print(f"✅ Success! Name: {result.get('full_name', 'N/A')}")
print(f" Headline: {result.get('headline', 'N/A')}")
except Exception as e:
print(f"❌ Failed: {e}")
if __name__ == "__main__":
test_authenticated_scraper()