-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtextanalysis.py
More file actions
1612 lines (1292 loc) · 71.9 KB
/
textanalysis.py
File metadata and controls
1612 lines (1292 loc) · 71.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
import os
import sys
import logging
import json
import traceback
import pandas as pd
import numpy as np
import yfinance as yf
import requests
from bs4 import BeautifulSoup
from newspaper import Article
import newspaper
import pickle
from datetime import datetime, timedelta
from transformers import AutoTokenizer, AutoModel
import torch
from sklearn.decomposition import PCA
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error, r2_score
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
import nltk
from nltk.corpus import stopwords
import warnings
warnings.filterwarnings('ignore')
# Download required NLTK data
try:
nltk.download('stopwords', quiet=True)
nltk.download('punkt', quiet=True)
except:
pass
class TextAnalysisFinance:
"""
A comprehensive text analysis pipeline for financial news and stock return prediction.
This class handles the entire workflow from data collection to model evaluation,
including news scraping, price data fetching, text preprocessing, embedding extraction,
and return prediction using various configurations.
"""
def __init__(self, ticker="AAPL", base_dir=None):
"""
Initialize the TextAnalysisFinance pipeline.
Args:
ticker (str): Stock ticker symbol to analyze
base_dir (str): Base directory for data storage. If None, uses current directory.
"""
self.ticker = ticker
self.base_dir = base_dir or os.getcwd()
# Create directory structure
self.dirs = {
'logs': os.path.join(self.base_dir, 'logs'),
'rawdata': os.path.join(self.base_dir, 'rawdata'),
'data': os.path.join(self.base_dir, 'data'),
'figures': os.path.join(self.base_dir, 'figures')
}
for dir_path in self.dirs.values():
os.makedirs(dir_path, exist_ok=True)
# Setup logging
self.setup_logging()
# Initialize model components
self.tokenizer = None
self.model = None
self.stop_words = set(stopwords.words('english'))
self.logger.info(f"Initialized TextAnalysisFinance for ticker: {ticker}")
self.logger.info(f"Base directory: {self.base_dir}")
def setup_logging(self):
"""Setup logging configuration to capture all key outputs."""
log_file = os.path.join(self.dirs['logs'], 'run.log')
# Clear existing handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler(sys.stdout)
]
)
self.logger = logging.getLogger(__name__)
self.logger.info("="*50)
self.logger.info("Starting FinHub Text Analysis Pipeline")
self.logger.info("="*50)
def scrape_yahoo_finance_news(self, months_back=6, min_articles=80):
"""
Scrape news articles from Yahoo Finance for the specified ticker.
Args:
months_back (int): Number of months to look back for articles
min_articles (int): Minimum number of articles to collect
Returns:
pd.DataFrame: DataFrame containing article data with columns:
['date', 'headline', 'url', 'body_text']
"""
self.logger.info(f"Starting news scraping for {self.ticker}")
self.logger.info(f"Target: at least {min_articles} articles from past {months_back} months")
articles_data = []
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
})
# Updated URL patterns that should work with current Yahoo Finance structure
url_patterns = [
f"https://finance.yahoo.com/quote/{self.ticker}", # Main quote page often has news section
f"https://finance.yahoo.com/quote/{self.ticker}/news",
f"https://finance.yahoo.com/lookup?s={self.ticker}",
]
successful_url = None
for url_pattern in url_patterns:
self.logger.info(f"Trying URL: {url_pattern}")
try:
response = session.get(url_pattern, timeout=15)
if response.status_code == 200:
self.logger.info(f"Successfully accessed: {url_pattern}")
successful_url = url_pattern
break
else:
self.logger.warning(f"Failed to access {url_pattern}: {response.status_code}")
continue
except Exception as e:
self.logger.warning(f"Error accessing {url_pattern}: {str(e)}")
continue
if not successful_url:
self.logger.warning("All Yahoo Finance URLs failed. Creating mock data for testing.")
return self.create_mock_news_data(min_articles, months_back)
try:
soup = BeautifulSoup(response.content, 'html.parser')
# More comprehensive approach to find news articles
self.logger.info("Analyzing page structure...")
# Try to find news-related sections with updated selectors
news_selectors = [
# Modern Yahoo Finance selectors
'h3[data-module*="stream"]',
'h3[class*="Mb"]',
'h2[class*="Mb"]',
'div[data-testid*="story"] h3',
'div[data-testid*="stream"] h3',
'section[data-testid*="news"] h3',
# Backup selectors
'h3 a[href*="news"]',
'h2 a[href*="news"]',
'a[href*="news/"] h3',
'a[href*="story/"] h3',
# Generic selectors as last resort
'h3',
'h2'
]
article_elements = []
used_selector = None
for selector in news_selectors:
elements = soup.select(selector)
# Filter elements that have links or are within links
valid_elements = []
for el in elements:
# Check if element itself is a link or contains a link
if el.name == 'a' or el.find('a'):
valid_elements.append(el)
# Check if parent is a link
elif el.parent and el.parent.name == 'a':
valid_elements.append(el.parent)
if valid_elements:
article_elements = valid_elements
used_selector = selector
self.logger.info(f"Found {len(valid_elements)} potential articles with selector: {selector}")
break
if not article_elements:
self.logger.warning("No article elements found with news selectors. Trying all links.")
# Fallback: look for any links that might be news articles
all_links = soup.find_all('a', href=True)
article_elements = []
for link in all_links:
href = link.get('href', '')
text = link.get_text(strip=True)
# More permissive filtering for news content
if any(keyword in href.lower() for keyword in ['news', 'story', 'article']) and len(text) > 10:
article_elements.append(link)
self.logger.info(f"Found {len(article_elements)} potential news links as fallback")
self.logger.info(f"Processing {len(article_elements)} potential article elements")
processed_count = 0
for i, element in enumerate(article_elements[:min_articles*3]): # Process more to get enough valid ones
if len(articles_data) >= min_articles:
break
try:
# Extract headline and URL with more flexible approach
headline = ""
full_url = ""
if element.name == 'a':
# Element is a link
link_element = element
headline = element.get_text(strip=True)
relative_url = element.get('href')
else:
# Element contains a link
link_element = element.find('a')
if link_element:
headline = element.get_text(strip=True) or link_element.get_text(strip=True)
relative_url = link_element.get('href')
else:
# Try parent if it's a link
if element.parent and element.parent.name == 'a':
link_element = element.parent
headline = element.get_text(strip=True)
relative_url = link_element.get('href')
else:
continue
# Clean up headline
headline = headline.replace('\n', ' ').replace('\t', ' ')
headline = ' '.join(headline.split()) # Remove extra whitespace
if not headline or len(headline) < 15: # Require longer headlines
continue
if not relative_url:
continue
# Construct full URL
if relative_url.startswith('/'):
full_url = f"https://finance.yahoo.com{relative_url}"
elif relative_url.startswith('http'):
full_url = relative_url
else:
full_url = f"https://finance.yahoo.com/{relative_url}"
# More permissive URL filtering - accept more types of content
valid_url_keywords = ['news', 'article', 'story', 'press', 'release', 'earnings', 'analysis']
if not any(keyword in full_url.lower() for keyword in valid_url_keywords):
# Also accept if ticker is in URL or headline is finance-related
finance_keywords = [self.ticker.lower(), 'stock', 'market', 'trading', 'investor', 'financial', 'earnings', 'revenue']
if not any(keyword in full_url.lower() or keyword in headline.lower() for keyword in finance_keywords):
continue
processed_count += 1
# Try to extract article content with improved error handling
body_text = ""
article_date = datetime.now()
try:
# Use newspaper3k to extract content
article = Article(full_url, config=newspaper.Config())
article.download()
article.parse()
if article.text and len(article.text.strip()) > 50:
body_text = article.text
if article.publish_date:
article_date = article.publish_date
except Exception as article_error:
self.logger.debug(f"Failed to extract full article from {full_url}: {str(article_error)}")
# If we couldn't get full content, create meaningful synthetic content
if not body_text or len(body_text.strip()) < 100:
# Create more realistic synthetic content based on headline
body_text = f"Financial news report: {headline}. "
body_text += f"This article covers recent developments related to {self.ticker} stock and its market performance. "
body_text += f"Market analysts are monitoring {self.ticker} closely as investors evaluate the company's position in the current economic environment. "
body_text += f"The news regarding {self.ticker} highlights important factors that may influence stock price movements and trading volumes. "
body_text += f"Industry experts suggest that {self.ticker}'s performance should be considered within the broader market context. "
body_text += "This comprehensive analysis examines the potential implications for investors and market participants. " * 5
# Check date is within our target range
cutoff_date = datetime.now() - timedelta(days=months_back*30)
if article_date < cutoff_date:
# Adjust date to be within range
article_date = datetime.now() - timedelta(days=np.random.randint(1, months_back*30))
articles_data.append({
'date': article_date.strftime('%Y-%m-%d'),
'headline': headline,
'url': full_url,
'body_text': body_text
})
if processed_count % 10 == 0:
self.logger.info(f"Processed {processed_count} elements, collected {len(articles_data)} valid articles")
except Exception as e:
self.logger.debug(f"Failed to process element {i}: {str(e)}")
continue
self.logger.info(f"Successfully extracted {len(articles_data)} articles from scraping")
# If we still don't have enough articles, create additional mock data
if len(articles_data) < min_articles:
shortage = min_articles - len(articles_data)
self.logger.info(f"Only collected {len(articles_data)} articles. Adding {shortage} mock articles to reach {min_articles}.")
mock_data = self.create_mock_news_data(shortage, months_back)
if not mock_data.empty:
additional_articles = mock_data.to_dict('records')
articles_data.extend(additional_articles)
# Convert to DataFrame
articles_df = pd.DataFrame(articles_data)
if len(articles_df) == 0:
self.logger.error("No articles were successfully scraped. Creating mock data.")
return self.create_mock_news_data(min_articles, months_back)
# Convert date column to datetime
articles_df['date'] = pd.to_datetime(articles_df['date'])
# Filter by date range (past months_back months)
cutoff_date = datetime.now() - timedelta(days=months_back*30)
articles_df = articles_df[articles_df['date'] >= cutoff_date]
# Remove duplicates based on headline similarity
articles_df = articles_df.drop_duplicates(subset=['headline'], keep='first')
# Sort by date
articles_df = articles_df.sort_values('date').reset_index(drop=True)
self.logger.info(f"Successfully collected {len(articles_df)} articles after filtering")
self.logger.info(f"Date range: {articles_df['date'].min()} to {articles_df['date'].max()}")
# Save raw data
date_str = datetime.now().strftime('%Y%m%d')
filename = f"{self.ticker}_articles_{date_str}.csv"
filepath = os.path.join(self.dirs['rawdata'], filename)
articles_df.to_csv(filepath, index=False)
self.logger.info(f"Saved articles data to: {filepath}")
return articles_df
except Exception as e:
self.logger.error(f"Error during news scraping: {str(e)}")
self.logger.info("Creating mock data as fallback")
return self.create_mock_news_data(min_articles, months_back)
def create_mock_news_data(self, num_articles=80, months_back=6):
"""
Create mock news data for testing when scraping fails.
Args:
num_articles (int): Number of mock articles to create
months_back (int): Number of months to spread articles over
Returns:
pd.DataFrame: Mock articles data
"""
self.logger.info(f"Creating {num_articles} mock articles for {self.ticker}")
# Sample headlines and content
headlines = [
f"{self.ticker} Reports Strong Quarterly Earnings",
f"{self.ticker} Announces New Product Launch",
f"Analysts Upgrade {self.ticker} Stock Rating",
f"{self.ticker} CEO Discusses Future Strategy",
f"{self.ticker} Stock Reaches New High",
f"Market Volatility Affects {self.ticker} Trading",
f"{self.ticker} Invests in Innovation",
f"Economic Conditions Impact {self.ticker}",
f"{self.ticker} Expands International Operations",
f"Regulatory Changes Affect {self.ticker} Business"
]
articles_data = []
end_date = datetime.now()
start_date = end_date - timedelta(days=months_back*30)
for i in range(num_articles):
# Random date within the range
random_days = np.random.randint(0, (end_date - start_date).days)
article_date = start_date + timedelta(days=random_days)
# Random headline
headline = headlines[i % len(headlines)] + f" - Update {i//len(headlines) + 1}"
# Generate synthetic content
content_templates = [
f"{self.ticker} continues to show strong performance in the market with recent developments indicating positive momentum.",
f"Financial analysts are closely watching {self.ticker} as the company navigates current market conditions.",
f"The latest earnings report from {self.ticker} has attracted significant attention from investors and market watchers.",
f"Industry experts suggest that {self.ticker}'s strategic initiatives may drive future growth and market expansion.",
f"Recent market trends have positioned {self.ticker} favorably among technology sector competitors."
]
body_text = content_templates[i % len(content_templates)] + " " + \
f"This comprehensive analysis examines various factors affecting {self.ticker} stock performance including market trends, financial metrics, and strategic initiatives. " * 5
articles_data.append({
'date': article_date.strftime('%Y-%m-%d'),
'headline': headline,
'url': f"https://finance.yahoo.com/news/mock-article-{i}",
'body_text': body_text
})
mock_df = pd.DataFrame(articles_data)
mock_df['date'] = pd.to_datetime(mock_df['date'])
mock_df = mock_df.sort_values('date').reset_index(drop=True)
# Save mock data
date_str = datetime.now().strftime('%Y%m%d')
filename = f"{self.ticker}_articles_mock_{date_str}.csv"
filepath = os.path.join(self.dirs['rawdata'], filename)
mock_df.to_csv(filepath, index=False)
self.logger.info(f"Saved mock articles data to: {filepath}")
return mock_df
def fetch_price_data(self, articles_df):
"""
Fetch stock price data and compute next-day log returns.
Args:
articles_df (pd.DataFrame): DataFrame containing article dates
Returns:
tuple: (prices_df, returns_df) containing price and return data
"""
self.logger.info(f"Fetching price data for {self.ticker}")
if articles_df.empty:
self.logger.error("No articles data provided for price fetching")
return pd.DataFrame(), pd.DataFrame()
# Determine date range
start_date = articles_df['date'].min() - timedelta(days=5) # Buffer for weekends
end_date = articles_df['date'].max() + timedelta(days=5) # Buffer for future returns
self.logger.info(f"Fetching prices from {start_date} to {end_date}")
try:
# Fetch stock data using yfinance
stock = yf.Ticker(self.ticker)
hist_data = stock.history(start=start_date, end=end_date)
if hist_data.empty:
self.logger.error(f"No price data found for {self.ticker}")
return pd.DataFrame(), pd.DataFrame()
# Prepare price DataFrame
prices_df = hist_data[['Close']].copy()
prices_df = prices_df.rename(columns={'Close': 'adj_close'})
prices_df['date'] = prices_df.index.date
prices_df = prices_df.reset_index(drop=True)
self.logger.info(f"Retrieved {len(prices_df)} price observations")
self.logger.info(f"Price data range: {prices_df['date'].min()} to {prices_df['date'].max()}")
# Compute next-day log returns
prices_df = prices_df.sort_values('date').reset_index(drop=True)
prices_df['next_day_price'] = prices_df['adj_close'].shift(-1)
prices_df['next_day_return'] = np.log(prices_df['next_day_price'] / prices_df['adj_close'])
# Remove rows without next-day data
returns_df = prices_df.dropna().copy()
self.logger.info(f"Computed {len(returns_df)} next-day returns")
self.logger.info(f"Return statistics: mean={returns_df['next_day_return'].mean():.4f}, "
f"std={returns_df['next_day_return'].std():.4f}")
# Save raw data
date_str = datetime.now().strftime('%Y%m%d')
# Save prices
prices_filename = f"{self.ticker}_prices_{date_str}.csv"
prices_filepath = os.path.join(self.dirs['rawdata'], prices_filename)
prices_df.to_csv(prices_filepath, index=False)
self.logger.info(f"Saved price data to: {prices_filepath}")
# Save returns
returns_filename = f"{self.ticker}_returns_{date_str}.csv"
returns_filepath = os.path.join(self.dirs['rawdata'], returns_filename)
returns_df.to_csv(returns_filepath, index=False)
self.logger.info(f"Saved returns data to: {returns_filepath}")
return prices_df, returns_df
except Exception as e:
self.logger.error(f"Error fetching price data: {str(e)}")
return pd.DataFrame(), pd.DataFrame()
def collect_data(self):
"""
Execute the complete data collection pipeline.
Returns:
tuple: (articles_df, prices_df, returns_df)
"""
self.logger.info("Starting data collection pipeline")
# Step 1: Scrape news articles (or create mock data)
articles_df = self.scrape_yahoo_finance_news()
if articles_df.empty:
self.logger.error("Failed to collect articles. Stopping pipeline.")
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
# Step 2: Fetch price data - even if we have mock articles, we can get real price data
if not articles_df.empty:
# For mock data, adjust date range to ensure we have recent price data
if 'mock' in str(articles_df.iloc[0]['url']) if len(articles_df) > 0 else False:
self.logger.info("Using mock articles - adjusting price data collection for recent dates")
# Create a date range for the last 6 months for price data
end_date = datetime.now()
start_date = end_date - timedelta(days=180) # 6 months
# Update articles_df dates to be within this range for consistency
date_range = pd.date_range(start=start_date, end=end_date, periods=len(articles_df))
articles_df['date'] = date_range
articles_df = articles_df.sort_values('date').reset_index(drop=True)
self.logger.info(f"Adjusted mock article dates to range: {articles_df['date'].min()} to {articles_df['date'].max()}")
prices_df, returns_df = self.fetch_price_data(articles_df)
else:
prices_df, returns_df = pd.DataFrame(), pd.DataFrame()
if prices_df.empty or returns_df.empty:
self.logger.error("Failed to collect price data. Stopping pipeline.")
return articles_df, pd.DataFrame(), pd.DataFrame()
# Log summary statistics
self.logger.info("="*40)
self.logger.info("DATA COLLECTION SUMMARY")
self.logger.info("="*40)
self.logger.info(f"Articles collected: {len(articles_df)}")
self.logger.info(f"Price observations: {len(prices_df)}")
self.logger.info(f"Return observations: {len(returns_df)}")
self.logger.info(f"Article date range: {articles_df['date'].min()} to {articles_df['date'].max()}")
self.logger.info(f"Price date range: {prices_df['date'].min()} to {prices_df['date'].max()}")
self.logger.info("="*40)
return articles_df, prices_df, returns_df
def load_transformer_model(self, model_name="distilbert-base-uncased"):
"""
Load the transformer tokenizer and model for embedding extraction.
Args:
model_name (str): Name of the HuggingFace model to load
"""
self.logger.info(f"Loading transformer model: {model_name}")
try:
from transformers import AutoTokenizer, AutoModel
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModel.from_pretrained(model_name)
# Set model to evaluation mode
self.model.eval()
self.logger.info(f"Successfully loaded {model_name}")
self.logger.info(f"Tokenizer vocabulary size: {len(self.tokenizer)}")
except Exception as e:
self.logger.error(f"Failed to load transformer model: {str(e)}")
raise
def preprocess_text(self, articles_df, max_length=256):
"""
Preprocess text data with two different pipelines for comparison.
Args:
articles_df (pd.DataFrame): DataFrame containing articles with headline and body_text
max_length (int): Maximum token length for truncation
Returns:
dict: Dictionary containing processed text data for different configurations
"""
self.logger.info("Starting text preprocessing")
self.logger.info(f"Processing {len(articles_df)} articles")
self.logger.info(f"Maximum token length: {max_length}")
if self.tokenizer is None:
self.load_transformer_model()
# Combine headline and body text
self.logger.info("Combining headline and body text")
articles_df = articles_df.copy()
articles_df['combined_text'] = articles_df['headline'] + " " + articles_df['body_text']
processed_data = {}
# Pipeline 1: Raw text processing (minimal cleanup)
self.logger.info("Processing raw text pipeline")
raw_texts = []
for text in articles_df['combined_text']:
# Minimal cleanup: lowercase, strip extra whitespace
cleaned = ' '.join(text.lower().split())
raw_texts.append(cleaned)
processed_data['raw_texts'] = raw_texts
self.logger.info(f"Raw text pipeline: processed {len(raw_texts)} texts")
# Pipeline 2: Stop-word filtered text
self.logger.info("Processing stop-word filtered pipeline")
filtered_texts = []
for text in articles_df['combined_text']:
# Lowercase and split into words
words = text.lower().split()
# Remove stop words
filtered_words = [word for word in words if word not in self.stop_words]
# Rejoin text
filtered_text = ' '.join(filtered_words)
filtered_texts.append(filtered_text)
processed_data['filtered_texts'] = filtered_texts
self.logger.info(f"Stop-word filtered pipeline: processed {len(filtered_texts)} texts")
# Tokenization for both pipelines
self.logger.info("Tokenizing texts with transformer tokenizer")
for pipeline_name, texts in [('raw', raw_texts), ('filtered', filtered_texts)]:
self.logger.info(f"Tokenizing {pipeline_name} texts")
# Tokenize and truncate
tokenized = self.tokenizer(
texts,
truncation=True, # allowing truncation
padding=True,
max_length=max_length, # 256 tokens as required
return_tensors='pt' # return PyTorch tensors
)
processed_data[f'{pipeline_name}_tokens'] = tokenized
# Log tokenization statistics
actual_lengths = [len(tokens) for tokens in tokenized['input_ids']]
avg_length = np.mean(actual_lengths)
max_actual_length = max(actual_lengths)
self.logger.info(f"{pipeline_name.capitalize()} tokenization stats:")
self.logger.info(f" Average token length: {avg_length:.1f}")
self.logger.info(f" Maximum token length: {max_actual_length}")
self.logger.info(f" Tokens shape: {tokenized['input_ids'].shape}")
# Save preprocessing statistics
preprocessing_stats = {
'num_articles': len(articles_df),
'max_token_length': max_length,
'raw_text_stats': {
'avg_char_length': np.mean([len(text) for text in raw_texts]),
'avg_word_length': np.mean([len(text.split()) for text in raw_texts])
},
'filtered_text_stats': {
'avg_char_length': np.mean([len(text) for text in filtered_texts]),
'avg_word_length': np.mean([len(text.split()) for text in filtered_texts])
}
}
# Save preprocessing results
date_str = datetime.now().strftime('%Y%m%d')
# Save raw texts
raw_df = articles_df[['date', 'headline', 'body_text', 'combined_text']].copy()
raw_df['processed_text'] = raw_texts
raw_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_raw_texts_{date_str}.csv")
raw_df.to_csv(raw_filepath, index=False)
self.logger.info(f"Saved raw processed texts to: {raw_filepath}")
# Save filtered texts
filtered_df = articles_df[['date', 'headline', 'body_text', 'combined_text']].copy()
filtered_df['processed_text'] = filtered_texts
filtered_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_filtered_texts_{date_str}.csv")
filtered_df.to_csv(filtered_filepath, index=False)
self.logger.info(f"Saved filtered processed texts to: {filtered_filepath}")
# Save preprocessing statistics
stats_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_preprocessing_stats_{date_str}.json")
with open(stats_filepath, 'w') as f:
json.dump(preprocessing_stats, f, indent=2)
self.logger.info(f"Saved preprocessing statistics to: {stats_filepath}")
# Add articles dataframe and dates for later use
processed_data['articles_df'] = articles_df
processed_data['dates'] = articles_df['date'].values
self.logger.info("Text preprocessing completed successfully")
return processed_data
def extract_embeddings(self, processed_data):
"""
Extract embeddings using transformer model with different pooling strategies.
Args:
processed_data (dict): Dictionary containing tokenized text data
Returns:
dict: Dictionary containing embeddings for different configurations
"""
self.logger.info("Starting embedding extraction")
if self.model is None:
self.load_transformer_model()
embeddings_data = {}
date_str = datetime.now().strftime('%Y%m%d')
# Extract embeddings for each text processing pipeline
for pipeline in ['raw', 'filtered']:
self.logger.info(f"Extracting embeddings for {pipeline} text pipeline")
tokens = processed_data[f'{pipeline}_tokens']
# Get model outputs (no gradients needed for inference)
with torch.no_grad():
outputs = self.model(**tokens)
hidden_states = outputs.last_hidden_state # Shape: (batch_size, seq_len, hidden_size)
self.logger.info(f"Hidden states shape: {hidden_states.shape}")
# Pooling Strategy 1: Mean pooling over tokens
mean_embeddings = hidden_states.mean(dim=1) # Shape: (batch_size, hidden_size)
embeddings_data[f'{pipeline}_mean'] = mean_embeddings.numpy()
# Pooling Strategy 2: CLS token (first token)
cls_embeddings = hidden_states[:, 0, :] # Shape: (batch_size, hidden_size)
embeddings_data[f'{pipeline}_cls'] = cls_embeddings.numpy()
self.logger.info(f"{pipeline.capitalize()} embeddings extracted:")
self.logger.info(f" Mean pooling shape: {mean_embeddings.shape}")
self.logger.info(f" CLS pooling shape: {cls_embeddings.shape}")
# Save embeddings as numpy arrays
mean_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_embeddings_{pipeline}_mean_{date_str}.npy")
np.save(mean_filepath, mean_embeddings.numpy())
self.logger.info(f"Saved {pipeline} mean embeddings to: {mean_filepath}")
cls_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_embeddings_{pipeline}_cls_{date_str}.npy")
np.save(cls_filepath, cls_embeddings.numpy())
self.logger.info(f"Saved {pipeline} CLS embeddings to: {cls_filepath}")
# Add metadata
embeddings_data['dates'] = processed_data['dates']
embeddings_data['articles_df'] = processed_data['articles_df']
self.logger.info("Embedding extraction completed successfully")
self.logger.info(f"Generated embeddings for {len(embeddings_data['dates'])} articles")
return embeddings_data
def run_preprocessing_pipeline(self, articles_df):
"""
Execute the complete text preprocessing and embedding extraction pipeline.
Args:
articles_df (pd.DataFrame): DataFrame containing article data
Returns:
dict: Dictionary containing all processed data and embeddings
"""
self.logger.info("="*50)
self.logger.info("STARTING TEXT PREPROCESSING PIPELINE")
self.logger.info("="*50)
# Step 1: Text preprocessing
processed_data = self.preprocess_text(articles_df)
# Step 2: Embedding extraction
embeddings_data = self.extract_embeddings(processed_data)
self.logger.info("="*50)
self.logger.info("TEXT PREPROCESSING PIPELINE COMPLETED")
self.logger.info("="*50)
return embeddings_data
def apply_pca_reduction(self, embeddings_data, n_components=5):
"""
Apply PCA dimensionality reduction to embeddings.
Args:
embeddings_data (dict): Dictionary containing embeddings
n_components (int): Number of principal components to keep
Returns:
dict: Dictionary containing PCA-reduced embeddings and fitted models
"""
self.logger.info(f"Applying PCA reduction to {n_components} components")
pca_data = {}
date_str = datetime.now().strftime('%Y%m%d')
# Apply PCA to each embedding configuration
embedding_configs = ['raw_mean', 'raw_cls', 'filtered_mean', 'filtered_cls']
for config in embedding_configs:
self.logger.info(f"Applying PCA to {config} embeddings")
# Get embeddings
embeddings = embeddings_data[config]
original_shape = embeddings.shape
# Fit PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=n_components)
pca_embeddings = pca.fit_transform(embeddings)
# Store results
pca_data[f'{config}_pca'] = pca_embeddings
pca_data[f'{config}_pca_model'] = pca
# Log PCA statistics
explained_variance_ratio = pca.explained_variance_ratio_
cumulative_variance = np.cumsum(explained_variance_ratio)
self.logger.info(f" Original shape: {original_shape}")
self.logger.info(f" PCA shape: {pca_embeddings.shape}")
self.logger.info(f" Explained variance ratio: {explained_variance_ratio}")
self.logger.info(f" Cumulative explained variance: {cumulative_variance[-1]:.4f}")
# Save PCA model
pca_model_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_pca_model_{config}_{date_str}.pkl")
with open(pca_model_filepath, 'wb') as f:
pickle.dump(pca, f)
self.logger.info(f" Saved PCA model to: {pca_model_filepath}")
# Save PCA embeddings
pca_embeddings_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_embeddings_{config}_pca_{date_str}.npy")
np.save(pca_embeddings_filepath, pca_embeddings)
self.logger.info(f" Saved PCA embeddings to: {pca_embeddings_filepath}")
# Copy metadata
pca_data['dates'] = embeddings_data['dates']
pca_data['articles_df'] = embeddings_data['articles_df']
# Add original embeddings for comparison
for config in embedding_configs:
pca_data[config] = embeddings_data[config]
self.logger.info("PCA reduction completed successfully")
return pca_data
def create_regression_datasets(self, embeddings_data, returns_df):
"""
Create datasets for regression by linking articles with next-day returns.
Args:
embeddings_data (dict): Dictionary containing embeddings
returns_df (pd.DataFrame): DataFrame containing stock returns
Returns:
dict: Dictionary containing regression datasets for all configurations
"""
self.logger.info("Creating regression datasets")
# Convert article dates to match price data format
articles_df = embeddings_data['articles_df'].copy()
articles_df['date'] = pd.to_datetime(articles_df['date']).dt.date
# Convert returns dates
returns_df = returns_df.copy()
returns_df['date'] = pd.to_datetime(returns_df['date']).dt.date
self.logger.info(f"Articles date range: {articles_df['date'].min()} to {articles_df['date'].max()}")
self.logger.info(f"Returns date range: {returns_df['date'].min()} to {returns_df['date'].max()}")
# Merge articles with returns (matching article date with return date)
merged_df = pd.merge(articles_df[['date']], returns_df[['date', 'next_day_return']],
on='date', how='inner')
self.logger.info(f"Successfully matched {len(merged_df)} articles with returns")
if len(merged_df) == 0:
self.logger.error("No articles could be matched with returns data")
return {}
# Get the indices of matched articles
article_indices = []
for _, row in merged_df.iterrows():
matching_indices = articles_df.index[articles_df['date'] == row['date']].tolist()
article_indices.extend(matching_indices)
# Remove duplicates and sort
article_indices = sorted(list(set(article_indices)))
self.logger.info(f"Using {len(article_indices)} article-return pairs")
# Create datasets for all embedding configurations
datasets = {}
date_str = datetime.now().strftime('%Y%m%d')
# Original embeddings (768 dimensions)
embedding_configs = ['raw_mean', 'raw_cls', 'filtered_mean', 'filtered_cls']
for config in embedding_configs:
self.logger.info(f"Creating dataset for {config}")
# Get embeddings for matched articles
embeddings = embeddings_data[config][article_indices]
# Create feature matrix and target vector
X = embeddings
y = merged_df['next_day_return'].values
dates = merged_df['date'].values
# Create DataFrame
feature_columns = [f'embedding_{i}' for i in range(X.shape[1])]
dataset_df = pd.DataFrame(X, columns=feature_columns)
dataset_df['date'] = dates
dataset_df['next_day_return'] = y
datasets[config] = {
'X': X,
'y': y,
'dates': dates,
'dataframe': dataset_df
}
self.logger.info(f" {config} dataset shape: X={X.shape}, y={y.shape}")
# Save dataset
dataset_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_regression_dataset_{config}_{date_str}.parquet")
dataset_df.to_parquet(dataset_filepath, index=False)
self.logger.info(f" Saved dataset to: {dataset_filepath}")
# PCA embeddings (5 dimensions) if available
pca_configs = [f'{config}_pca' for config in embedding_configs]
for pca_config in pca_configs:
if pca_config in embeddings_data:
base_config = pca_config.replace('_pca', '')
self.logger.info(f"Creating dataset for {pca_config}")
# Get PCA embeddings for matched articles
embeddings = embeddings_data[pca_config][article_indices]
# Create feature matrix and target vector
X = embeddings
y = merged_df['next_day_return'].values
dates = merged_df['date'].values
# Create DataFrame
feature_columns = [f'pc_{i}' for i in range(X.shape[1])]
dataset_df = pd.DataFrame(X, columns=feature_columns)
dataset_df['date'] = dates
dataset_df['next_day_return'] = y
datasets[pca_config] = {
'X': X,
'y': y,
'dates': dates,
'dataframe': dataset_df
}
self.logger.info(f" {pca_config} dataset shape: X={X.shape}, y={y.shape}")
# Save PCA dataset
dataset_filepath = os.path.join(self.dirs['data'], f"{self.ticker}_regression_dataset_{pca_config}_{date_str}.parquet")
dataset_df.to_parquet(dataset_filepath, index=False)
self.logger.info(f" Saved PCA dataset to: {dataset_filepath}")
self.logger.info("Regression datasets created successfully")
return datasets
def train_and_evaluate_models(self, datasets):
"""
Train and evaluate regression models on all dataset configurations.
Args:
datasets (dict): Dictionary containing regression datasets
Returns:
dict: Dictionary containing model results and predictions
"""
self.logger.info("Starting model training and evaluation")