diff --git a/README.md b/README.md index 53b3e4d..7f09ab2 100644 --- a/README.md +++ b/README.md @@ -38,18 +38,56 @@ The app uses `litellm`, allowing you to use any major LLM provider: ## Usage -Run the analysis for any stock ticker: +### CLI Interface + +The project now includes a powerful CLI interface built with `rich` and `typer`. ```bash -poetry run python main.py AAPL +# Analyze a stock +poetry run python cli.py analyze -t AAPL -m ollama/mistral:7b + +# Manage your portfolio +poetry run python cli.py portfolio + +# See daily runners with momentum scoring +poetry run python cli.py runners --mode live + +# Run a live streaming scanner +poetry run python cli.py runners --stream + +# Backtest runners for a specific date +poetry run python cli.py runners --mode backtest --date 2024-02-15 + +# Manage catalysts and news +poetry run python cli.py news add --symbol TSLA --title "Earnings Beat" --source "Reuters" --type earnings --sentiment positive +poetry run python cli.py news list --symbol TSLA +poetry run python cli.py news fetch TSLA -m ollama/mistral:7b -catastr 1 ``` -You can also specify a different model: +### Catalyst-Aware Momentum Scanning +The `runners` command now integrates with the news module. If recent news (catalysts) exist for a ticker, the momentum score is adjusted based on: +- **Sentiment**: Positive catalysts increase the score; negative ones decrease it. +- **News Type**: Earnings, FDA approvals, and Guidance updates carry higher weight. +- **Strength**: Manually rated catalyst strength (1-5) scales the impact. + +### Analysis with Custom Model + +You can still run a single analysis via the CLI: ```bash -poetry run python main.py TSLA --model anthropic/claude-3-5-sonnet-20240620 +poetry run python cli.py analyze AAPL ``` +### Configuration +- Portfolio is stored in `portfolio.json`. +- Analysis exports are saved to `analysis_export.json` and `analysis_export.csv`. + +## Project Structure +- `trading_analysis/domain`: Core interfaces and data models. +- `trading_analysis/infrastructure`: Data providers (yfinance, ddgs) and quantitative engines. +- `trading_analysis/application`: High-level business logic and analyzer orchestration. +- `cli.py`: Main entry point for the interactive CLI. + ## Token Usage & Costs Analysis A single analysis run involves 7 LLM calls to process different aspects of the stock (Health, Value, Sentiment, 3 Judges, and Final Resolver). diff --git a/cli.py b/cli.py new file mode 100644 index 0000000..a00f45c --- /dev/null +++ b/cli.py @@ -0,0 +1,556 @@ +import typer +from rich.console import Console +from rich.table import Table +from rich.panel import Panel +from rich.live import Live +from rich.progress import Progress, SpinnerColumn, TextColumn +from rich import print as rprint +from rich.columns import Columns +import os +from typing import Optional +from datetime import datetime + +from trading_analysis.infrastructure.portfolio_manager import PortfolioManager +from trading_analysis.infrastructure.runners_data import YFinanceRunnersProvider +from trading_analysis.infrastructure.engines.runners_scoring import MomentumScoringEngine +from trading_analysis.application.runners_service import DefaultRunnersService +from trading_analysis.infrastructure.persistence import SQLAlchemyRunnersPersistence, SQLAlchemyNewsRepository +from trading_analysis.application.news_service import DefaultNewsService +from trading_analysis.infrastructure.engines.backtest_engine import MomentumBacktestEngine +from trading_analysis.application.analyzer import StockAnalyzer +from trading_analysis.infrastructure.aggregated_data import AggregatedDataProvider +from trading_analysis.infrastructure.llm_service import LiteLLMService +from trading_analysis.infrastructure.engines.risk_engine import YFinanceRiskEngine +from trading_analysis.infrastructure.engines.market_regime import YFinanceMarketRegimeEngine +from trading_analysis.infrastructure.engines.portfolio_engine import YFinancePortfolioImpactEngine +from trading_analysis.infrastructure.engines.relative_strength import YFinanceRelativeStrengthEngine +from trading_analysis.infrastructure.engines.monte_carlo import GBM_MonteCarloEngine +from trading_analysis.application.exporter import DataExporter + +app = typer.Typer(help="Trading Analyst CLI") +news_app = typer.Typer(help="Manage stock news and catalysts") +app.add_typer(news_app, name="news") + +console = Console() +portfolio_manager = PortfolioManager() + +def get_news_service(model: Optional[str] = None): + repo = SQLAlchemyNewsRepository() + llm = LiteLLMService(model) if model else None + return DefaultNewsService(repo, llm) + +@news_app.command("add") +def news_add( + symbol: str = typer.Option(..., "--symbol", "-s", help="Stock ticker"), + title: str = typer.Option(..., "--title", "-t", help="News title"), + source: str = typer.Option(..., "--source", help="Publisher/Source"), + news_type: str = typer.Option("other", "--type", help="earnings, guidance, FDA, merger, acquisition, offering, dilution, contract, analyst_upgrade, analyst_downgrade, macro, other"), + sentiment: str = typer.Option("neutral", "--sentiment", help="positive, neutral, negative"), + strength: int = typer.Option(1, "--strength", help="Catalyst strength (1-5)"), + content: Optional[str] = typer.Option(None, "--content", help="Full text content"), + link: Optional[str] = typer.Option(None, "--link", help="URL to news") +): + """Add a new catalyst/news entry.""" + service = get_news_service() + news = service.add_news( + symbol=symbol, title=title, publisher=source, + news_type=news_type, sentiment=sentiment, + catalyst_strength=strength, content=content, link=link + ) + rprint(f"[green]Added news with ID: {news.id} for {news.symbol}[/green]") + +@news_app.command("update") +def news_update( + news_id: int = typer.Option(..., "--id", help="News entry ID"), + title: Optional[str] = typer.Option(None, "--title", "-t"), + sentiment: Optional[str] = typer.Option(None, "--sentiment"), + strength: Optional[int] = typer.Option(None, "--strength"), + content: Optional[str] = typer.Option(None, "--content") +): + """Update an existing news entry.""" + service = get_news_service() + try: + news = service.update_news( + news_id=news_id, title=title, content=content, + sentiment=sentiment, catalyst_strength=strength + ) + rprint(f"[green]Updated news ID {news.id}[/green]") + except ValueError as e: + rprint(f"[red]{e}[/red]") + +@news_app.command("delete") +def news_delete( + news_id: int = typer.Option(..., "--id", help="News entry ID"), + force: bool = typer.Option(False, "--force", help="Physically delete instead of soft-delete") +): + """Delete a news entry.""" + service = get_news_service() + service.delete_news(news_id, force=force) + rprint(f"[yellow]Deleted news ID {news_id}[/yellow]") + +@news_app.command("list") +def news_list( + symbol: Optional[str] = typer.Option(None, "--symbol", "-s", help="Filter by ticker") +): + """List recent news entries.""" + service = get_news_service() + news_items = service.list_news(symbol=symbol) + + if not news_items: + rprint("[yellow]No news entries found.[/yellow]") + return + + table = Table(title=f"Recent News{' for ' + symbol.upper() if symbol else ''}", show_header=True, header_style="bold magenta") + table.add_column("ID", style="dim") + table.add_column("Symbol", style="cyan") + table.add_column("Type") + table.add_column("Sentiment") + table.add_column("Str", justify="right") + table.add_column("Title") + table.add_column("Source") + + for n in news_items: + sent_color = "green" if n.sentiment == "positive" else "red" if n.sentiment == "negative" else "white" + table.add_row( + str(n.id), n.symbol, n.news_type, f"[{sent_color}]{n.sentiment}[/{sent_color}]", + str(n.catalyst_strength), n.title, n.publisher + ) + console.print(table) + +@news_app.command("search") +def news_search( + keyword: str = typer.Argument(..., help="Keyword to search in title or content") +): + """Search news entries by keyword.""" + service = get_news_service() + news_items = service.search_news(keyword) + + if not news_items: + rprint(f"[yellow]No news entries found for '{keyword}'.[/yellow]") + return + + table = Table(title=f"Search Results: {keyword}", show_header=True, header_style="bold magenta") + table.add_column("ID", style="dim") + table.add_column("Symbol", style="cyan") + table.add_column("Title") + + for n in news_items: + table.add_row(str(n.id), n.symbol, n.title) + console.print(table) + +@news_app.command("link") +def news_link( + news_id: int = typer.Option(..., "--news-id", help="News ID"), + runner_id: str = typer.Option(..., "--runner-id", help="Runner ID (e.g. 20250218-TSLA)") +): + """Link a news entry to a detected runner.""" + service = get_news_service() + link = service.link_news_to_runner(runner_id, news_id) + rprint(f"[green]Linked news {news_id} to runner {runner_id}[/green]") + +@news_app.command("fetch") +def news_fetch( + symbol: str = typer.Argument(..., help="Stock ticker to fetch news for"), + model: str = typer.Option("gpt-4o", "--model", "-m", help="LLM model for categorization"), + catalyst_strength: float = typer.Option(1, "--catalyst_strength", "-catastr", help="Catalyst strength threshold for news categorization") +): + """Fetch and automatically categorize news from Google News RSS.""" + service = get_news_service(model) + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + transient=True, + ) as progress: + progress.add_task(description=f"Fetching and categorizing news for {symbol.upper()}...", total=None) + new_items = service.auto_ingest_news(symbol, catalyst_strength) + + if not new_items: + rprint(f"[yellow]No new news found for {symbol.upper()}.[/yellow]") + else: + rprint(f"[green]Successfully ingested {len(new_items)} new news items for {symbol.upper()}.[/green]") + table = Table(title=f"New Catalysts for {symbol.upper()}", show_header=True) + table.add_column("Type", style="cyan") + table.add_column("Sentiment") + table.add_column("Str", justify="right") + table.add_column("Title") + + for n in new_items: + sent_color = "green" if n.sentiment == "positive" else "red" if n.sentiment == "negative" else "white" + table.add_row(n.news_type, f"[{sent_color}]{n.sentiment}[/{sent_color}]", str(n.catalyst_strength), n.title) + console.print(table) + +@news_app.command("unlink") +def news_unlink( + news_id: int = typer.Option(..., "--news-id", help="News ID"), + runner_id: str = typer.Option(..., "--runner-id", help="Runner ID") +): + """Unlink a news entry from a runner.""" + service = get_news_service() + service.unlink_news_from_runner(runner_id, news_id) + rprint(f"[yellow]Unlinked news {news_id} from runner {runner_id}[/yellow]") + +def get_analyzer(model: str): + data_provider = AggregatedDataProvider() + llm_service = LiteLLMService(model) + news_service = get_news_service(model) # Added news service + risk_engine = YFinanceRiskEngine() + regime_engine = YFinanceMarketRegimeEngine() + portfolio_engine = YFinancePortfolioImpactEngine() + rs_engine = YFinanceRelativeStrengthEngine() + mc_engine = GBM_MonteCarloEngine() + + return StockAnalyzer( + data_provider=data_provider, + llm_service=llm_service, + news_service=news_service, + risk_engine=risk_engine, + regime_engine=regime_engine, + portfolio_engine=portfolio_engine, + rs_engine=rs_engine, + mc_engine=mc_engine + ) + +@app.command() +def analyze( + ticker: str = typer.Option("AAPL", "--ticker", "-t", help="Stock ticker to analyze"), + model: str = typer.Option("ollama/llama3", "--model", "-m", help="LLM model to use (e.g., gpt-4o, ollama/llama3, lm_studio/model)"), + catalyst_strength: float = typer.Option(1, "--catalyst_strength", "-catastr", help="Catalyst strength threshold for news categorization")): + """Analyze a specific stock ticker.""" + analyzer = get_analyzer(model) + full_portfolio = portfolio_manager.get_portfolio() + # Extract only weights for the quantitative engine + weights_only = {t: item.weight for t, item in full_portfolio.items()} + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + transient=True, + ) as progress: + progress.add_task(description=f"Analyzing {ticker}...", total=None) + analysis = analyzer.run_analysis(ticker, weights_only, catalyst_strength) + + # Display results + rprint(Panel(f"[bold blue]Analysis Results for {ticker}[/bold blue]", expand=True)) + + # Quantitative Insights Table + q_table = Table(title="[bold]Quantitative Insights[/bold]", show_header=True, header_style="bold magenta", expand=True) + q_table.add_column("Metric", style="dim") + q_table.add_column("Value") + + if analysis.market_regime: + q_table.add_row("Market Regime", f"{analysis.market_regime.regime_type} (Conf: {analysis.market_regime.regime_confidence:.2f})") + + if analysis.risk_metrics: + q_table.add_row("1Y Volatility", f"{analysis.risk_metrics.volatility_1y:.2%}") + q_table.add_row("Beta vs SPY", f"{analysis.risk_metrics.beta:.2f}") + q_table.add_row("Sharpe Ratio", f"{analysis.risk_metrics.sharpe_ratio:.2f}") + + if analysis.monte_carlo: + q_table.add_row("MC Median Return (1Y)", f"{analysis.monte_carlo.median_return:+.2%}") + q_table.add_row("Prob of +20% move", f"{analysis.monte_carlo.prob_up_20:.2%}") + + console.print(q_table) + + # Judge Opinions + rprint("\n[bold]Expert Judge Opinions[/bold]") + opinions = [] + for opinion in [analysis.trader_opinion, analysis.analyst_opinion, analysis.risk_pro_opinion]: + color = "green" if opinion.recommendation == "Buy" else "red" if opinion.recommendation == "Sell" else "yellow" + opinions.append(Panel( + f"[bold {color}]{opinion.recommendation}[/bold {color}]\n\n{opinion.opinion}", + title=f"[bold]{opinion.role}[/bold]", + border_style=color, + width=40 + )) + console.print(Columns(opinions)) + + # Final Decision Panel + fd = analysis.final_decision + fd_color = "green" if fd.recommendation == "Buy" else "red" if fd.recommendation == "Sell" else "yellow" + + fd_text = f"[bold {fd_color}]Recommendation: {fd.recommendation}[/bold {fd_color}]\n" + fd_text += f"Conviction Score: [bold]{fd.conviction_score}/100[/bold]\n" + fd_text += f"Risk-Adjusted Rating: [bold]{fd.risk_adjusted_rating}/5.0[/bold]\n" + fd_text += f"Suggested Position Size: [bold]{fd.position_size_suggestion*100:.2f}%[/bold]\n" + fd_text += f"Agreement Index: [bold]{fd.agreement_index:.2f}[/bold]" + + rprint(Panel(fd_text, title="[bold]FINAL DECISION[/bold]", border_style=fd_color, expand=True)) + + # Drivers and Risks in a table + dr_table = Table.grid(expand=True) + dr_table.add_column("Drivers", style="green") + dr_table.add_column("Risks", style="red") + + drivers_list = "\n".join([f"• {d}" for d in fd.primary_drivers]) + risks_list = "\n".join([f"• {r}" for r in fd.key_risks]) + + dr_table.add_row( + Panel(drivers_list, title="[bold green]Primary Drivers[/bold green]"), + Panel(risks_list, title="[bold red]Key Risks[/bold red]") + ) + console.print(dr_table) + + # Export option + export = typer.confirm("Do you want to export this analysis?") + if export: + DataExporter.to_json(analysis) + DataExporter.to_csv(analysis) + rprint("[green]Exported to analysis_export.json and analysis_export.csv[/green]") + +@app.command() +def portfolio(): + """Manage your portfolio and track performance.""" + import yfinance as yf + while True: + p = portfolio_manager.get_portfolio() + if not p: + rprint("[yellow]Portfolio is empty.[/yellow]") + else: + # Fetch current prices for performance tracking + tickers = list(p.keys()) + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + transient=True, + ) as progress: + progress.add_task(description="Fetching current market data...", total=None) + try: + data = yf.download(tickers, period="1d", progress=False)['Close'] + if len(tickers) == 1: + current_prices = {tickers[0]: float(data.iloc[-1])} + else: + current_prices = {t: float(data[t].iloc[-1]) for t in tickers if t in data.columns} + except Exception: + current_prices = {} + + table = Table(title="Current Portfolio", show_header=True, header_style="bold green") + table.add_column("Ticker", style="cyan") + table.add_column("Name", style="white") + table.add_column("Weight", justify="right") + table.add_column("Entry Price", justify="right") + table.add_column("Current Price", justify="right") + table.add_column("P&L %", justify="right") + + for ticker, item in p.items(): + curr = current_prices.get(ticker) + entry = item.entry_price + + price_str = f"${entry:.2f}" if entry else "N/A" + curr_str = f"${curr:.2f}" if curr else "N/A" + + pnl_str = "N/A" + if curr and entry: + pnl = (curr - entry) / entry * 100 + color = "green" if pnl >= 0 else "red" + pnl_str = f"[{color}]{pnl:+.2f}%[/{color}]" + + table.add_row( + ticker, + item.name or "N/A", + f"{item.weight*100:.1f}%", + price_str, + curr_str, + pnl_str + ) + + console.print(table) + rprint("\n[dim]Use these insights for Scalping (short), Swing (medium), or Long Term decisions.[/dim]") + + choice = typer.prompt("\n(a)dd/update, (r)emove, (q)uit portfolio mgmt", default="q") + if choice == 'a': + t = typer.prompt("Ticker").upper() + w_str = typer.prompt("Weight (e.g. 0.1 for 10%)") + try: + w = float(w_str) + p_val_str = typer.prompt("Entry Price (optional, press enter to skip)", default="") + p_val = float(p_val_str) if p_val_str else None + + portfolio_manager.update_ticker(t, w, entry_price=p_val) + rprint(f"[green]Updated {t}[/green]") + except ValueError: + rprint("[red]Invalid input. Please enter numbers for weight and price.[/red]") + elif choice == 'r': + t = typer.prompt("Ticker to remove").upper() + portfolio_manager.remove_ticker(t) + rprint(f"[yellow]Removed {t}[/yellow]") + elif choice == 'q': + break + +@app.command() +def runners( + mode: str = typer.Option("live", "--mode", "-m", help="Mode: live, premarket, backtest"), + top_n: int = typer.Option(10, "--top", "-t", help="Number of top runners to show"), + json_out: bool = typer.Option(False, "--json", help="Output in JSON format"), + csv_out: bool = typer.Option(False, "--csv", help="Output in CSV format"), + date: str = typer.Option(None, "--date", help="Date for backtest (YYYY-MM-DD)"), + live_loop: bool = typer.Option(False, "--stream", "-s", help="Run in a continuous live loop") +): + """Catch the daily runners with momentum scoring.""" + provider = YFinanceRunnersProvider() + news_repo = SQLAlchemyNewsRepository() + scoring_engine = MomentumScoringEngine(news_repository=news_repo) + persistence = SQLAlchemyRunnersPersistence() + + service = DefaultRunnersService(provider, scoring_engine, persistence) + + if mode == "backtest": + if not date: + rprint("[red]Date is required for backtest mode.[/red]") + return + + bt_engine = MomentumBacktestEngine(news_repository=news_repo) + bt_date = datetime.strptime(date, '%Y-%m-%d') + + # For backtest, we might need a list of tickers that were runners on that day. + # Since we don't have that easily, we check if we have history in DB for that day + # or ask for tickers. + history = persistence.get_history(start_date=bt_date) + tickers = list(set([h.ticker for h in history])) + + if not tickers: + # Fallback: let the user provide tickers or use a sample for demo + rprint("[yellow]No historical data found in DB for this date. Provide tickers to backtest or scan live first.[/yellow]") + ticker_input = typer.prompt("Enter tickers to backtest (comma separated)", default="NVDA,TSLA,AMD,AAPL") + tickers = [t.strip().upper() for t in ticker_input.split(",")] + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + transient=True, + ) as progress: + progress.add_task(description=f"Running backtest for {date}...", total=None) + results = bt_engine.run_backtest(tickers, bt_date) + + if "error" in results: + rprint(f"[red]Backtest failed: {results['error']}[/red]") + return + + rprint(Panel(f"[bold blue]Backtest Results for {date}[/bold blue]")) + rprint(f"Tickers analyzed: {results['tickers_count']}") + rprint(f"Win Rate: [bold]{results['win_rate']:.2%}[/bold]") + rprint(f"Avg Return: [bold]{results['avg_return']:.2%}[/bold]") + rprint(f"Avg Extension: [bold]{results['avg_extension']:.2%}[/bold]") + + if results.get('catalyst_metrics'): + cm = results['catalyst_metrics'] + rprint(Panel( + f"Tickers with Catalyst: {cm['count']}\n" + f"Win Rate: [bold]{cm['win_rate']:.2%}[/bold]\n" + f"Avg Return: [bold]{cm['avg_return']:.2%}[/bold]", + title="[bold green]Catalyst Performance[/bold green]" + )) + + table = Table(title="Backtest Details", show_header=True) + table.add_column("Ticker") + table.add_column("Max Extension") + table.add_column("Max Drawdown") + table.add_column("Return") + table.add_column("Catalyst") + table.add_column("Result") + + for r in results['details']: + res_color = "green" if r['return'] > 0 else "red" + cat_str = "[green]YES[/green]" if r['has_catalyst'] else "[dim]NO[/dim]" + table.add_row( + r['ticker'], + f"{r['extension']:.2%}", + f"{r['max_drawdown']:.2%}", + f"[{res_color}]{r['return']:+.2%}[/{res_color}]", + cat_str, + "[green]WIN[/green]" if r['return'] > 0 else "[red]LOSS[/red]" + ) + console.print(table) + return + + if live_loop: + import asyncio + from rich.live import Live + + def generate_table(snapshot): + table = Table(title=f"Live Momentum Scanner - {snapshot.timestamp.strftime('%H:%M:%S')}", show_header=True, header_style="bold green") + table.add_column("Ticker", style="cyan") + table.add_column("Score", justify="right", style="bold") + table.add_column("Class", justify="center") + table.add_column("Price", justify="right") + table.add_column("% Chg", justify="right") + table.add_column("RelVol", justify="right") + table.add_column("VWAP Dist", justify="right") + + for item in snapshot.runners: + score_color = "green" if item.score >= 70 else "yellow" if item.score >= 40 else "white" + class_color = "bold green" if item.classification == "Strong Runner" else "yellow" if item.classification == "Developing Runner" else "dim" + table.add_row( + item.ticker, f"[{score_color}]{item.score:.1f}[/{score_color}]", + f"[{class_color}]{item.classification}[/{class_color}]", + f"${item.price:.2f}", f"{item.pct_change:+.2f}%", + f"{item.relative_volume:.2f}x", f"{item.vwap_dist:+.2f}%" + ) + return table + + async def run_loop(): + with Live(generate_table(service.get_runners()), refresh_per_second=1) as live: + while True: + await asyncio.sleep(60) # Poll every 60s + snapshot = service.get_runners() + live.update(generate_table(snapshot)) + + try: + asyncio.run(run_loop()) + except KeyboardInterrupt: + rprint("\n[yellow]Live scan stopped.[/yellow]") + return + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + transient=True, + ) as progress: + progress.add_task(description=f"Scanning for {mode} runners...", total=None) + snapshot = service.get_runners(mode=mode, top_n=top_n) + + if json_out: + rprint(snapshot.model_dump_json(indent=2)) + return + + if csv_out: + import pandas as pd + df = pd.DataFrame([r.model_dump() for r in snapshot.runners]) + filename = f"runners_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv" + df.to_csv(filename, index=False) + rprint(f"[green]Exported to {filename}[/green]") + return + + if snapshot.runners: + table = Table(title=f"Top {mode.capitalize()} Runners", show_header=True, header_style="bold green") + table.add_column("Ticker", style="cyan") + table.add_column("Score", justify="right", style="bold") + table.add_column("Class", justify="center") + table.add_column("Price", justify="right") + table.add_column("% Chg", justify="right") + table.add_column("RelVol", justify="right") + table.add_column("Gap%", justify="right") + table.add_column("VWAP Dist", justify="right") + table.add_column("Vol Acc", justify="right") + + for item in snapshot.runners: + score_color = "green" if item.score >= 70 else "yellow" if item.score >= 40 else "white" + class_color = "bold green" if item.classification == "Strong Runner" else "yellow" if item.classification == "Developing Runner" else "dim" + + table.add_row( + item.ticker, + f"[{score_color}]{item.score:.1f}[/{score_color}]", + f"[{class_color}]{item.classification}[/{class_color}]", + f"${item.price:.2f}", + f"{item.pct_change:+.2f}%", + f"{item.relative_volume:.2f}x", + f"{item.gap_pct:+.2f}%", + f"{item.vwap_dist:+.2f}%", + f"{item.volume_acceleration:.2f}x" + ) + console.print(table) + rprint(f"\n[dim]Analyzed {snapshot.universe_size} stocks. Snapshot at {snapshot.timestamp.strftime('%H:%M:%S')}[/dim]") + else: + rprint("[yellow]No runners detected matching filters.[/yellow]") + +if __name__ == "__main__": + app() diff --git a/poetry.lock b/poetry.lock index b89fea4..0f83ea8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -170,6 +170,18 @@ files = [ frozenlist = ">=1.1.0" typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""} +[[package]] +name = "annotated-doc" +version = "0.0.4" +description = "Document parameters, class attributes, return types, and variables inline, with Annotated." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320"}, + {file = "annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4"}, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -627,12 +639,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main"] -markers = "platform_system == \"Windows\"" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\"", dev = "sys_platform == \"win32\""} [[package]] name = "cryptography" @@ -1497,6 +1509,74 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4 [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0)"] +[[package]] +name = "greenlet" +version = "3.3.1" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +files = [ + {file = "greenlet-3.3.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:04bee4775f40ecefcdaa9d115ab44736cd4b9c5fba733575bfe9379419582e13"}, + {file = "greenlet-3.3.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:50e1457f4fed12a50e427988a07f0f9df53cf0ee8da23fab16e6732c2ec909d4"}, + {file = "greenlet-3.3.1-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:070472cd156f0656f86f92e954591644e158fd65aa415ffbe2d44ca77656a8f5"}, + {file = "greenlet-3.3.1-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1108b61b06b5224656121c3c8ee8876161c491cbe74e5c519e0634c837cf93d5"}, + {file = "greenlet-3.3.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a300354f27dd86bae5fbf7002e6dd2b3255cd372e9242c933faf5e859b703fe"}, + {file = "greenlet-3.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e84b51cbebf9ae573b5fbd15df88887815e3253fc000a7d0ff95170e8f7e9729"}, + {file = "greenlet-3.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0093bd1a06d899892427217f0ff2a3c8f306182b8c754336d32e2d587c131b4"}, + {file = "greenlet-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:7932f5f57609b6a3b82cc11877709aa7a98e3308983ed93552a1c377069b20c8"}, + {file = "greenlet-3.3.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:5fd23b9bc6d37b563211c6abbb1b3cab27db385a4449af5c32e932f93017080c"}, + {file = "greenlet-3.3.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f51496a0bfbaa9d74d36a52d2580d1ef5ed4fdfcff0a73730abfbbbe1403dd"}, + {file = "greenlet-3.3.1-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb0feb07fe6e6a74615ee62a880007d976cf739b6669cce95daa7373d4fc69c5"}, + {file = "greenlet-3.3.1-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:67ea3fc73c8cd92f42467a72b75e8f05ed51a0e9b1d15398c913416f2dafd49f"}, + {file = "greenlet-3.3.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39eda9ba259cc9801da05351eaa8576e9aa83eb9411e8f0c299e05d712a210f2"}, + {file = "greenlet-3.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e2e7e882f83149f0a71ac822ebf156d902e7a5d22c9045e3e0d1daf59cee2cc9"}, + {file = "greenlet-3.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80aa4d79eb5564f2e0a6144fcc744b5a37c56c4a92d60920720e99210d88db0f"}, + {file = "greenlet-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:32e4ca9777c5addcbf42ff3915d99030d8e00173a56f80001fb3875998fe410b"}, + {file = "greenlet-3.3.1-cp311-cp311-win_arm64.whl", hash = "sha256:da19609432f353fed186cc1b85e9440db93d489f198b4bdf42ae19cc9d9ac9b4"}, + {file = "greenlet-3.3.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:7e806ca53acf6d15a888405880766ec84721aa4181261cd11a457dfe9a7a4975"}, + {file = "greenlet-3.3.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d842c94b9155f1c9b3058036c24ffb8ff78b428414a19792b2380be9cecf4f36"}, + {file = "greenlet-3.3.1-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:20fedaadd422fa02695f82093f9a98bad3dab5fcda793c658b945fcde2ab27ba"}, + {file = "greenlet-3.3.1-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c620051669fd04ac6b60ebc70478210119c56e2d5d5df848baec4312e260e4ca"}, + {file = "greenlet-3.3.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14194f5f4305800ff329cbf02c5fcc88f01886cadd29941b807668a45f0d2336"}, + {file = "greenlet-3.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7b2fe4150a0cf59f847a67db8c155ac36aed89080a6a639e9f16df5d6c6096f1"}, + {file = "greenlet-3.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:49f4ad195d45f4a66a0eb9c1ba4832bb380570d361912fa3554746830d332149"}, + {file = "greenlet-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:cc98b9c4e4870fa983436afa999d4eb16b12872fab7071423d5262fa7120d57a"}, + {file = "greenlet-3.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:bfb2d1763d777de5ee495c85309460f6fd8146e50ec9d0ae0183dbf6f0a829d1"}, + {file = "greenlet-3.3.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:7ab327905cabb0622adca5971e488064e35115430cec2c35a50fd36e72a315b3"}, + {file = "greenlet-3.3.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65be2f026ca6a176f88fb935ee23c18333ccea97048076aef4db1ef5bc0713ac"}, + {file = "greenlet-3.3.1-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7a3ae05b3d225b4155bda56b072ceb09d05e974bc74be6c3fc15463cf69f33fd"}, + {file = "greenlet-3.3.1-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:12184c61e5d64268a160226fb4818af4df02cfead8379d7f8b99a56c3a54ff3e"}, + {file = "greenlet-3.3.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6423481193bbbe871313de5fd06a082f2649e7ce6e08015d2a76c1e9186ca5b3"}, + {file = "greenlet-3.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:33a956fe78bbbda82bfc95e128d61129b32d66bcf0a20a1f0c08aa4839ffa951"}, + {file = "greenlet-3.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b065d3284be43728dd280f6f9a13990b56470b81be20375a207cdc814a983f2"}, + {file = "greenlet-3.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:27289986f4e5b0edec7b5a91063c109f0276abb09a7e9bdab08437525977c946"}, + {file = "greenlet-3.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:2f080e028001c5273e0b42690eaf359aeef9cb1389da0f171ea51a5dc3c7608d"}, + {file = "greenlet-3.3.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:bd59acd8529b372775cd0fcbc5f420ae20681c5b045ce25bd453ed8455ab99b5"}, + {file = "greenlet-3.3.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b31c05dd84ef6871dd47120386aed35323c944d86c3d91a17c4b8d23df62f15b"}, + {file = "greenlet-3.3.1-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:02925a0bfffc41e542c70aa14c7eda3593e4d7e274bfcccca1827e6c0875902e"}, + {file = "greenlet-3.3.1-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3e0f3878ca3a3ff63ab4ea478585942b53df66ddde327b59ecb191b19dbbd62d"}, + {file = "greenlet-3.3.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34a729e2e4e4ffe9ae2408d5ecaf12f944853f40ad724929b7585bca808a9d6f"}, + {file = "greenlet-3.3.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aec9ab04e82918e623415947921dea15851b152b822661cce3f8e4393c3df683"}, + {file = "greenlet-3.3.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:71c767cf281a80d02b6c1bdc41c9468e1f5a494fb11bc8688c360524e273d7b1"}, + {file = "greenlet-3.3.1-cp314-cp314-win_amd64.whl", hash = "sha256:96aff77af063b607f2489473484e39a0bbae730f2ea90c9e5606c9b73c44174a"}, + {file = "greenlet-3.3.1-cp314-cp314-win_arm64.whl", hash = "sha256:b066e8b50e28b503f604fa538adc764a638b38cf8e81e025011d26e8a627fa79"}, + {file = "greenlet-3.3.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:3e63252943c921b90abb035ebe9de832c436401d9c45f262d80e2d06cc659242"}, + {file = "greenlet-3.3.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:76e39058e68eb125de10c92524573924e827927df5d3891fbc97bd55764a8774"}, + {file = "greenlet-3.3.1-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c9f9d5e7a9310b7a2f416dd13d2e3fd8b42d803968ea580b7c0f322ccb389b97"}, + {file = "greenlet-3.3.1-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b9721549a95db96689458a1e0ae32412ca18776ed004463df3a9299c1b257ab"}, + {file = "greenlet-3.3.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92497c78adf3ac703b57f1e3813c2d874f27f71a178f9ea5887855da413cd6d2"}, + {file = "greenlet-3.3.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ed6b402bc74d6557a705e197d47f9063733091ed6357b3de33619d8a8d93ac53"}, + {file = "greenlet-3.3.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:59913f1e5ada20fde795ba906916aea25d442abcc0593fba7e26c92b7ad76249"}, + {file = "greenlet-3.3.1-cp314-cp314t-win_amd64.whl", hash = "sha256:301860987846c24cb8964bdec0e31a96ad4a2a801b41b4ef40963c1b44f33451"}, + {file = "greenlet-3.3.1.tar.gz", hash = "sha256:41848f3230b58c08bb43dee542e74a2a2e34d3c59dc3076cec9151aeeedcae98"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] + [[package]] name = "grpc-google-iam-v1" version = "0.14.3" @@ -1822,6 +1902,18 @@ perf = ["ipython"] test = ["flufl.flake8", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["mypy (<1.19) ; platform_python_implementation == \"PyPy\"", "pytest-mypy (>=1.0.1)"] +[[package]] +name = "iniconfig" +version = "2.3.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, +] + [[package]] name = "jinja2" version = "3.1.6" @@ -2182,6 +2274,30 @@ html-clean = ["lxml_html_clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] +[[package]] +name = "markdown-it-py" +version = "4.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"}, + {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "markdown-it-pyrs", "mistletoe (>=1.0,<2.0)", "mistune (>=3.0,<4.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins (>=0.5.0)"] +profiling = ["gprof2dot"] +rtd = ["ipykernel", "jupyter_sphinx", "mdit-py-plugins (>=0.5.0)", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme (>=1.0,<2.0)", "sphinx-copybutton", "sphinx-design"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions", "requests"] + [[package]] name = "markupsafe" version = "3.0.3" @@ -2281,6 +2397,18 @@ files = [ {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "multidict" version = "6.7.1" @@ -2564,7 +2692,7 @@ version = "26.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"}, {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, @@ -2696,6 +2824,22 @@ docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx- test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] type = ["mypy (>=1.18.2)"] +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + [[package]] name = "primp" version = "0.15.0" @@ -2888,6 +3032,83 @@ files = [ {file = "protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c"}, ] +[[package]] +name = "psycopg2-binary" +version = "2.9.11" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6fe6b47d0b42ce1c9f1fa3e35bb365011ca22e39db37074458f27921dca40f2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c0e4262e089516603a09474ee13eabf09cb65c332277e39af68f6233911087"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:763c93ef1df3da6d1a90f86ea7f3f806dc06b21c198fa87c3c25504abec9404a"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:366df99e710a2acd90efed3764bb1e28df6c675d33a7fb40df9b7281694432ee"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:92e3b669236327083a2e33ccfa0d320dd01b9803b3e14dd986a4fc54aa00f4e1"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e0deeb03da539fa3577fcb0b3f2554a97f7e5477c246098dbb18091a4a01c16f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b52a3f9bb540a3e4ec0f6ba6d31339727b2950c9772850d6545b7eae0b9d7c5"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:db4fd476874ccfdbb630a54426964959e58da4c61c9feba73e6094d51303d7d8"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47f212c1d3be608a12937cc131bd85502954398aaa1320cb4c14421a0ffccf4c"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e35b7abae2b0adab776add56111df1735ccc71406e56203515e228a8dc07089f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fcf21be3ce5f5659daefd2b3b3b6e4727b028221ddc94e6c1523425579664747"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9bd81e64e8de111237737b29d68039b9c813bdf520156af36d26819c9a979e5f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:32770a4d666fbdafab017086655bcddab791d7cb260a16679cc5a7338b64343b"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3cb3a676873d7506825221045bd70e0427c905b9c8ee8d6acd70cfcbd6e576d"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:4012c9c954dfaccd28f94e84ab9f94e12df76b4afb22331b1f0d3154893a6316"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20e7fb94e20b03dcc783f76c0865f9da39559dcc0c28dd1a3fce0d01902a6b9c"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bdab48575b6f870f465b397c38f1b415520e9879fdf10a53ee4f49dcbdf8a21"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9d3a9edcfbe77a3ed4bc72836d466dfce4174beb79eda79ea155cc77237ed9e8"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:44fc5c2b8fa871ce7f0023f619f1349a0aa03a0857f2c96fbc01c657dcbbdb49"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9c55460033867b4622cda1b6872edf445809535144152e5d14941ef591980edf"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2d11098a83cca92deaeaed3d58cfd150d49b3b06ee0d0852be466bf87596899e"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:691c807d94aecfbc76a14e1408847d59ff5b5906a04a23e12a89007672b9e819"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b81627b691f29c4c30a8f322546ad039c40c328373b11dff7490a3e1b517855"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:b637d6d941209e8d96a072d7977238eea128046effbf37d1d8b2c0764750017d"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:41360b01c140c2a03d346cec3280cf8a71aa07d94f3b1509fa0161c366af66b4"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:875039274f8a2361e5207857899706da840768e2a775bf8c65e82f60b197df02"}, +] + [[package]] name = "pyasn1" version = "0.6.2" @@ -3084,6 +3305,43 @@ files = [ [package.dependencies] typing-extensions = ">=4.14.1" +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pytest" +version = "9.0.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b"}, + {file = "pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1.0.1" +packaging = ">=22" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -3389,6 +3647,25 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "13.9.4" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "rpds-py" version = "0.30.0" @@ -3674,6 +3951,104 @@ files = [ {file = "soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349"}, ] +[[package]] +name = "sqlalchemy" +version = "2.0.46" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sqlalchemy-2.0.46-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:895296687ad06dc9b11a024cf68e8d9d3943aa0b4964278d2553b86f1b267735"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab65cb2885a9f80f979b85aa4e9c9165a31381ca322cbde7c638fe6eefd1ec39"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:52fe29b3817bd191cc20bad564237c808967972c97fa683c04b28ec8979ae36f"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:09168817d6c19954d3b7655da6ba87fcb3a62bb575fb396a81a8b6a9fadfe8b5"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:be6c0466b4c25b44c5d82b0426b5501de3c424d7a3220e86cd32f319ba56798e"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-win32.whl", hash = "sha256:1bc3f601f0a818d27bfe139f6766487d9c88502062a2cd3a7ee6c342e81d5047"}, + {file = "sqlalchemy-2.0.46-cp310-cp310-win_amd64.whl", hash = "sha256:e0c05aff5c6b1bb5fb46a87e0f9d2f733f83ef6cbbbcd5c642b6c01678268061"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:261c4b1f101b4a411154f1da2b76497d73abbfc42740029205d4d01fa1052684"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:181903fe8c1b9082995325f1b2e84ac078b1189e2819380c2303a5f90e114a62"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:590be24e20e2424a4c3c1b0835e9405fa3d0af5823a1a9fc02e5dff56471515f"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7568fe771f974abadce52669ef3a03150ff03186d8eb82613bc8adc435a03f01"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf7e1e78af38047e08836d33502c7a278915698b7c2145d045f780201679999"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-win32.whl", hash = "sha256:9d80ea2ac519c364a7286e8d765d6cd08648f5b21ca855a8017d9871f075542d"}, + {file = "sqlalchemy-2.0.46-cp311-cp311-win_amd64.whl", hash = "sha256:585af6afe518732d9ccd3aea33af2edaae4a7aa881af5d8f6f4fe3a368699597"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a9a72b0da8387f15d5810f1facca8f879de9b85af8c645138cba61ea147968c"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2347c3f0efc4de367ba00218e0ae5c4ba2306e47216ef80d6e31761ac97cb0b9"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9094c8b3197db12aa6f05c51c05daaad0a92b8c9af5388569847b03b1007fb1b"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37fee2164cf21417478b6a906adc1a91d69ae9aba8f9533e67ce882f4bb1de53"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b1e14b2f6965a685c7128bd315e27387205429c2e339eeec55cb75ca4ab0ea2e"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-win32.whl", hash = "sha256:412f26bb4ba942d52016edc8d12fb15d91d3cd46b0047ba46e424213ad407bcb"}, + {file = "sqlalchemy-2.0.46-cp312-cp312-win_amd64.whl", hash = "sha256:ea3cd46b6713a10216323cda3333514944e510aa691c945334713fca6b5279ff"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:93a12da97cca70cea10d4b4fc602589c4511f96c1f8f6c11817620c021d21d00"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af865c18752d416798dae13f83f38927c52f085c52e2f32b8ab0fef46fdd02c2"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8d679b5f318423eacb61f933a9a0f75535bfca7056daeadbf6bd5bcee6183aee"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:64901e08c33462acc9ec3bad27fc7a5c2b6491665f2aa57564e57a4f5d7c52ad"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8ac45e8f4eaac0f9f8043ea0e224158855c6a4329fd4ee37c45c61e3beb518e"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-win32.whl", hash = "sha256:8d3b44b3d0ab2f1319d71d9863d76eeb46766f8cf9e921ac293511804d39813f"}, + {file = "sqlalchemy-2.0.46-cp313-cp313-win_amd64.whl", hash = "sha256:77f8071d8fbcbb2dd11b7fd40dedd04e8ebe2eb80497916efedba844298065ef"}, + {file = "sqlalchemy-2.0.46-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1e8cc6cc01da346dc92d9509a63033b9b1bda4fed7a7a7807ed385c7dccdc10"}, + {file = "sqlalchemy-2.0.46-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:96c7cca1a4babaaf3bfff3e4e606e38578856917e52f0384635a95b226c87764"}, + {file = "sqlalchemy-2.0.46-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2a9f9aee38039cf4755891a1e50e1effcc42ea6ba053743f452c372c3152b1b"}, + {file = "sqlalchemy-2.0.46-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:db23b1bf8cfe1f7fda19018e7207b20cdb5168f83c437ff7e95d19e39289c447"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:56bdd261bfd0895452006d5316cbf35739c53b9bb71a170a331fa0ea560b2ada"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:33e462154edb9493f6c3ad2125931e273bbd0be8ae53f3ecd1c161ea9a1dd366"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9bcdce05f056622a632f1d44bb47dbdb677f58cad393612280406ce37530eb6d"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8e84b09a9b0f19accedcbeff5c2caf36e0dd537341a33aad8d680336152dc34e"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4f52f7291a92381e9b4de9050b0a65ce5d6a763333406861e33906b8aa4906bf"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-win32.whl", hash = "sha256:70ed2830b169a9960193f4d4322d22be5c0925357d82cbf485b3369893350908"}, + {file = "sqlalchemy-2.0.46-cp314-cp314-win_amd64.whl", hash = "sha256:3c32e993bc57be6d177f7d5d31edb93f30726d798ad86ff9066d75d9bf2e0b6b"}, + {file = "sqlalchemy-2.0.46-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4dafb537740eef640c4d6a7c254611dca2df87eaf6d14d6a5fca9d1f4c3fc0fa"}, + {file = "sqlalchemy-2.0.46-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42a1643dc5427b69aca967dae540a90b0fbf57eaf248f13a90ea5930e0966863"}, + {file = "sqlalchemy-2.0.46-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ff33c6e6ad006bbc0f34f5faf941cfc62c45841c64c0a058ac38c799f15b5ede"}, + {file = "sqlalchemy-2.0.46-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:82ec52100ec1e6ec671563bbd02d7c7c8d0b9e71a0723c72f22ecf52d1755330"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac245604295b521de49b465bab845e3afe6916bcb2147e5929c8041b4ec0545"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e6199143d51e3e1168bedd98cc698397404a8f7508831b81b6a29b18b051069"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:716be5bcabf327b6d5d265dbdc6213a01199be587224eb991ad0d37e83d728fd"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6f827fd687fa1ba7f51699e1132129eac8db8003695513fcf13fc587e1bd47a5"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c805fa6e5d461329fa02f53f88c914d189ea771b6821083937e79550bf31fc19"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-win32.whl", hash = "sha256:3aac08f7546179889c62b53b18ebf1148b10244b3405569c93984b0388d016a7"}, + {file = "sqlalchemy-2.0.46-cp38-cp38-win_amd64.whl", hash = "sha256:0cc3117db526cad3e61074100bd2867b533e2c7dc1569e95c14089735d6fb4fe"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:90bde6c6b1827565a95fde597da001212ab436f1b2e0c2dcc7246e14db26e2a3"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94b1e5f3a5f1ff4f42d5daab047428cd45a3380e51e191360a35cef71c9a7a2a"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93bb0aae40b52c57fd74ef9c6933c08c040ba98daf23ad33c3f9893494b8d3ce"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4e2cc868b7b5208aec6c960950b7bb821f82c2fe66446c92ee0a571765e91a5"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:965c62be8256d10c11f8907e7a8d3e18127a4c527a5919d85fa87fd9ecc2cfdc"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-win32.whl", hash = "sha256:9397b381dcee8a2d6b99447ae85ea2530dcac82ca494d1db877087a13e38926d"}, + {file = "sqlalchemy-2.0.46-cp39-cp39-win_amd64.whl", hash = "sha256:4396c948d8217e83e2c202fbdcc0389cf8c93d2c1c5e60fa5c5a955eae0e64be"}, + {file = "sqlalchemy-2.0.46-py3-none-any.whl", hash = "sha256:f9c11766e7e7c0a2767dda5acb006a118640c9fc0a4104214b96269bfb78399e"}, + {file = "sqlalchemy-2.0.46.tar.gz", hash = "sha256:cf36851ee7219c170bb0793dbc3da3e80c582e04a5437bc601bfe8c85c9216d7"}, +] + +[package.dependencies] +greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + [[package]] name = "tiktoken" version = "0.12.0" @@ -3812,6 +4187,24 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "typer" +version = "0.24.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "typer-0.24.0-py3-none-any.whl", hash = "sha256:5fc435a9c8356f6160ed6e85a6301fdd6e3d8b2851da502050d1f92c5e9eddc8"}, + {file = "typer-0.24.0.tar.gz", hash = "sha256:f9373dc4eff901350694f519f783c29b6d7a110fc0dcc11b1d7e353b85ca6504"}, +] + +[package.dependencies] +annotated-doc = ">=0.0.2" +click = ">=8.2.1" +rich = ">=12.3.0" +shellingham = ">=1.3.0" + [[package]] name = "typer-slim" version = "0.21.1" @@ -4158,4 +4551,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.12,<4.0" -content-hash = "9fa07aa9df83b58cdb2e3536b99565344cd7571b6ec718e14fac72a8ffff9699" +content-hash = "bf1d1cc34976102fd5fd353b1c966aefadf304a2e1562bc3d309585bc9de8ae4" diff --git a/pyproject.toml b/pyproject.toml index a3612f5..85d96b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,25 +1,30 @@ -[project] -name = "analisistrading" +[tool.poetry] +name = "trading-analyst" version = "0.1.0" -description = "" -authors = [ - {name = "Jose Luis Guerra Infante"} -] +description = "A sophisticated trading analysis tool." +authors = ["Jose Luis Guerra Infante"] license = "MIT" readme = "README.md" -requires-python = ">=3.12,<4.0" -dependencies = [ - "yfinance (>=1.1.0,<2.0.0)", - "pydantic (>=2.12.5,<3.0.0)", - "python-dotenv (>=1.2.1,<2.0.0)", - "litellm[google] (>=1.81.9,<2.0.0)", - "pandas (>=3.0.0,<4.0.0)", - "numpy (>=2.4.2,<3.0.0)", - "ddgs (>=9.10.0,<10.0.0)", - "feedparser (>=6.0.12,<7.0.0)" -] +packages = [{include = "trading_analysis"}] +[tool.poetry.dependencies] +python = ">=3.12,<4.0" +yfinance = ">=1.1.0,<2.0.0" +pydantic = ">=2.12.5,<3.0.0" +python-dotenv = ">=1.2.1,<2.0.0" +litellm = {extras = ["google"], version = ">=1.81.9,<2.0.0"} +pandas = ">=3.0.0,<4.0.0" +numpy = ">=2.4.2,<3.0.0" +ddgs = ">=9.10.0,<10.0.0" +feedparser = ">=6.0.12,<7.0.0" +rich = ">=13.9.4,<14.0.0" +typer = ">=0.15.1,<1.0.0" +sqlalchemy = "^2.0.46" +psycopg2-binary = "^2.9.11" + +[tool.poetry.group.dev.dependencies] +pytest = ">=8.0.0" [build-system] -requires = ["poetry-core>=2.0.0,<3.0.0"] +requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/tests/test_news.py b/tests/test_news.py new file mode 100644 index 0000000..99f0d19 --- /dev/null +++ b/tests/test_news.py @@ -0,0 +1,91 @@ +import pytest +from datetime import datetime, timedelta +from trading_analysis.domain.models import NewsItem, NewsType, Sentiment, RunnerItem +from trading_analysis.infrastructure.persistence import SQLAlchemyNewsRepository +from trading_analysis.application.news_service import DefaultNewsService +from trading_analysis.infrastructure.engines.runners_scoring import MomentumScoringEngine + +@pytest.fixture +def news_repo(): + # Use in-memory SQLite for testing + return SQLAlchemyNewsRepository("sqlite:///:memory:") + +@pytest.fixture +def news_service(news_repo): + return DefaultNewsService(news_repo) + +def test_news_crud(news_service): + # Add + news = news_service.add_news( + symbol="TSLA", + title="Earnings Beat", + publisher="Reuters", + news_type="earnings", + sentiment="positive", + catalyst_strength=5 + ) + assert news.id is not None + assert news.symbol == "TSLA" + assert news.news_type == NewsType.EARNINGS + assert news.sentiment == Sentiment.POSITIVE + + # List + items = news_service.list_news(symbol="TSLA") + assert len(items) == 1 + assert items[0].title == "Earnings Beat" + + # Update + updated = news_service.update_news(news.id, title="Massive Earnings Beat", catalyst_strength=4) + assert updated.title == "Massive Earnings Beat" + assert updated.catalyst_strength == 4 + + # Search + search_results = news_service.search_news("Massive") + assert len(search_results) == 1 + + # Delete (soft) + news_service.delete_news(news.id) + items_after_del = news_service.list_news(symbol="TSLA") + assert len(items_after_del) == 0 + +def test_news_linking(news_service): + news = news_service.add_news("AAPL", "New iPhone", "Apple", "other", "positive", 3) + runner_id = "20250218-AAPL" + + link = news_service.link_news_to_runner(runner_id, news.id) + assert link.runner_id == runner_id + assert link.news_id == news.id + + # Get news for runner + news_for_runner = news_service.repository.get_news_for_runner(runner_id) + assert len(news_for_runner) == 1 + assert news_for_runner[0].title == "New iPhone" + + # Unlink + news_service.unlink_news_from_runner(runner_id, news.id) + news_after_unlink = news_service.repository.get_news_for_runner(runner_id) + assert len(news_after_unlink) == 0 + +def test_catalyst_scoring(news_repo): + scoring_engine = MomentumScoringEngine(news_repository=news_repo) + + # Item with no news + item = RunnerItem( + ticker="AMD", price=100.0, change=5.0, pct_change=5.0, volume=1000000, + relative_volume=2.0, range_expansion=1.5, volatility=0.02, volume_acceleration=1.2 + ) + + breakdown_no_news = scoring_engine.get_breakdown(item) + assert breakdown_no_news.catalyst_score == 0.0 + + # Add positive earnings news + news_repo.add(NewsItem( + symbol="AMD", title="Earnings Beat", publisher="AMD", + news_type=NewsType.EARNINGS, sentiment=Sentiment.POSITIVE, + catalyst_strength=5, provider_publish_time=datetime.now() + )) + + breakdown_with_news = scoring_engine.get_breakdown(item) + # Catalyst score: strength 5 (50) * positive (1.5) * earnings (1.5) = 112.5 (capped at 100) + assert breakdown_with_news.catalyst_score == 100.0 + assert breakdown_with_news.total_score > breakdown_no_news.total_score diff --git a/tests/test_runners_scoring.py b/tests/test_runners_scoring.py new file mode 100644 index 0000000..268d0ba --- /dev/null +++ b/tests/test_runners_scoring.py @@ -0,0 +1,62 @@ +import pytest +from trading_analysis.domain.models import RunnerItem +from trading_analysis.infrastructure.engines.runners_scoring import MomentumScoringEngine + +def test_scoring_logic(): + engine = MomentumScoringEngine() + + # Strong Runner + strong_item = RunnerItem( + ticker="STRG", + price=100.0, + change=15.0, + pct_change=15.0, + volume=1000000, + relative_volume=10.0, + range_expansion=3.0, + volatility=0.2, + volume_acceleration=5.0 + ) + + score = engine.compute_score(strong_item) + assert score > 50 + + # Weak Runner + weak_item = RunnerItem( + ticker="WEAK", + price=10.0, + change=0.1, + pct_change=1.0, + volume=100000, + relative_volume=0.5, + range_expansion=0.5, + volatility=0.01, + volume_acceleration=0.5 + ) + + weak_score = engine.compute_score(weak_item) + assert weak_score < score + +def test_breakdown(): + engine = MomentumScoringEngine() + item = RunnerItem( + ticker="TEST", + price=10.0, + change=0.5, + pct_change=5.0, + volume=100000, + relative_volume=2.0, + range_expansion=1.2, + volatility=0.02, + volume_acceleration=1.5 + ) + + breakdown = engine.get_breakdown(item) + assert breakdown.momentum_score > 0 + assert breakdown.liquidity_score > 0 + assert abs(breakdown.total_score - ( + breakdown.momentum_score * 0.4 + + breakdown.liquidity_score * 0.3 + + breakdown.volatility_score * 0.15 + + breakdown.acceleration_score * 0.15 + )) < 0.001 diff --git a/trading_analysis/application/analyzer.py b/trading_analysis/application/analyzer.py index 6178abb..651d068 100644 --- a/trading_analysis/application/analyzer.py +++ b/trading_analysis/application/analyzer.py @@ -1,6 +1,6 @@ from ..domain.interfaces import ( DataProvider, LLMService, RiskEngine, RelativeStrengthEngine, - MonteCarloEngine, MarketRegimeEngine, PortfolioImpactEngine + MonteCarloEngine, MarketRegimeEngine, PortfolioImpactEngine, NewsService ) from ..domain.models import ComprehensiveAnalysis, FinalDecision, FinalRecommendation from typing import Dict, Optional @@ -10,6 +10,7 @@ def __init__( self, data_provider: DataProvider, llm_service: LLMService, + news_service: Optional[NewsService] = None, risk_engine: Optional[RiskEngine] = None, rs_engine: Optional[RelativeStrengthEngine] = None, mc_engine: Optional[MonteCarloEngine] = None, @@ -18,17 +19,27 @@ def __init__( ): self.data_provider = data_provider self.llm_service = llm_service + self.news_service = news_service self.risk_engine = risk_engine self.rs_engine = rs_engine self.mc_engine = mc_engine self.regime_engine = regime_engine self.portfolio_engine = portfolio_engine - def run_analysis(self, ticker: str, portfolio: Optional[Dict[str, float]] = None) -> ComprehensiveAnalysis: + def run_analysis(self, ticker: str, portfolio: Optional[Dict[str, float]] = None, catalyst_strength: float = 1) -> ComprehensiveAnalysis: # 1. Fetch Basic Data financials = self.data_provider.get_financials(ticker) technical = self.data_provider.get_technical_data(ticker) - news = self.data_provider.get_news(ticker) + + # Ingest and get structured news if service is available + if self.news_service: + # This fetches from GNews RSS and categorizes new items via LLM + self.news_service.auto_ingest_news(ticker, catalyst_strength) + # Fetch the most recent news for this symbol from our repository + news = self.news_service.list_news(symbol=ticker) + else: + # Fallback to the provider's un-categorized news + news = self.data_provider.get_news(ticker) # 2. Run Quantitative Engines risk_metrics = self.risk_engine.compute_metrics(ticker) if self.risk_engine else None @@ -50,7 +61,7 @@ def run_analysis(self, ticker: str, portfolio: Optional[Dict[str, float]] = None "health_summary": health_summary, "market_value_analysis": market_value_analysis, "sentiment_news_analysis": sentiment_news_analysis, - "recent_news": [f"[{n.publisher}] {n.title} (Source: {n.link})" for n in news[:20]], + "recent_news": [f"[{n.publisher}] {n.title} (Source: {n.link})" for n in news[:100]], "risk_metrics": risk_metrics.model_dump() if risk_metrics else {}, "relative_strength": rel_strength.model_dump() if rel_strength else {}, "monte_carlo": monte_carlo.model_dump() if monte_carlo else {}, diff --git a/trading_analysis/application/exporter.py b/trading_analysis/application/exporter.py new file mode 100644 index 0000000..b99dcfd --- /dev/null +++ b/trading_analysis/application/exporter.py @@ -0,0 +1,56 @@ +import json +import csv +import os +from datetime import datetime +from typing import List, Optional +from ..domain.models import ComprehensiveAnalysis, ExportData + +class DataExporter: + @staticmethod + def to_json(analysis: ComprehensiveAnalysis, filename: str = "analysis_export.json"): + export_data = ExportData( + timestamp=datetime.now(), + ticker=analysis.ticker, + signal=analysis.final_decision.recommendation, + conviction=analysis.final_decision.conviction_score, + key_metrics={ + "volatility": analysis.risk_metrics.volatility_1y if analysis.risk_metrics else 0, + "beta": analysis.risk_metrics.beta if analysis.risk_metrics else 1.0, + "sharpe": analysis.risk_metrics.sharpe_ratio if analysis.risk_metrics else 0, + "regime": analysis.market_regime.regime_type if analysis.market_regime else "Unknown", + "suggested_size": analysis.final_decision.position_size_suggestion + } + ) + + # We can append to a list or overwrite. Let's append to a list if exists. + data_list = [] + if os.path.exists(filename): + try: + with open(filename, 'r') as f: + data_list = json.load(f) + if not isinstance(data_list, list): + data_list = [data_list] + except Exception: + data_list = [] + + data_list.append(export_data.model_dump(mode='json')) + + with open(filename, 'w') as f: + json.dump(data_list, f, indent=4) + + @staticmethod + def to_csv(analysis: ComprehensiveAnalysis, filename: str = "analysis_export.csv"): + file_exists = os.path.exists(filename) + with open(filename, 'a', newline='') as f: + writer = csv.writer(f) + if not file_exists: + writer.writerow(["Timestamp", "Ticker", "Signal", "Conviction", "Volatility", "Regime"]) + + writer.writerow([ + datetime.now().isoformat(), + analysis.ticker, + analysis.final_decision.recommendation, + analysis.final_decision.conviction_score, + analysis.risk_metrics.volatility_1y if analysis.risk_metrics else "N/A", + analysis.market_regime.regime_type if analysis.market_regime else "Unknown" + ]) diff --git a/trading_analysis/application/news_service.py b/trading_analysis/application/news_service.py new file mode 100644 index 0000000..9bddc07 --- /dev/null +++ b/trading_analysis/application/news_service.py @@ -0,0 +1,122 @@ +from typing import List, Optional +from datetime import datetime, timedelta +from ..domain.interfaces import NewsService, NewsRepository, LLMService +from ..domain.models import NewsItem, NewsType, Sentiment, RunnerNewsLink + +class DefaultNewsService(NewsService): + def __init__(self, repository: NewsRepository, llm_service: Optional[LLMService] = None): + self.repository = repository + self.llm_service = llm_service + from ..infrastructure.news_gnews import GoogleNewsRSSProvider + self.news_provider = GoogleNewsRSSProvider() + + def add_news(self, symbol: str, title: str, publisher: str, news_type: str, + sentiment: str, catalyst_strength: int, content: Optional[str] = None, + link: Optional[str] = None, publish_time: Optional[datetime] = None) -> NewsItem: + + # Validation + symbol = symbol.upper() + + # Normalize enums + try: + nt = NewsType(news_type.lower()) + except ValueError: + nt = NewsType.OTHER + + try: + st = Sentiment(sentiment.lower()) + except ValueError: + st = Sentiment.NEUTRAL + + news = NewsItem( + symbol=symbol, + title=title, + content=content, + publisher=publisher, + link=link, + news_type=nt, + sentiment=st, + catalyst_strength=catalyst_strength, + provider_publish_time=publish_time or datetime.now() + ) + + return self.repository.add(news) + + def update_news(self, news_id: int, title: Optional[str] = None, + content: Optional[str] = None, sentiment: Optional[str] = None, + catalyst_strength: Optional[int] = None) -> NewsItem: + + updates = {} + if title: updates['title'] = title + if content: updates['content'] = content + if sentiment: + try: + updates['sentiment'] = Sentiment(sentiment.lower()) + except ValueError: + pass + if catalyst_strength is not None: + updates['catalyst_strength'] = catalyst_strength + + return self.repository.update(news_id, updates) + + def delete_news(self, news_id: int, force: bool = False): + self.repository.delete(news_id, soft=not force) + + def list_news(self, symbol: Optional[str] = None) -> List[NewsItem]: + return self.repository.list(symbol=symbol, limit=100) + + def search_news(self, keyword: str) -> List[NewsItem]: + return self.repository.search(keyword) + + def link_news_to_runner(self, runner_id: str, news_id: int) -> RunnerNewsLink: + return self.repository.link_to_runner(runner_id, news_id) + + def unlink_news_from_runner(self, runner_id: str, news_id: int): + self.repository.unlink_from_runner(runner_id, news_id) + + def auto_ingest_news(self, symbol: str, catalyst_strength: float = 1) -> List[NewsItem]: + symbol = symbol.upper() + # 1. Fetch news from GNews + raw_news = self.news_provider.get_news(symbol) + + # 2. Filter out already existing news in repository (by title and symbol) + existing_news = self.repository.list(symbol=symbol, limit=100) + existing_titles = {n.title.strip().lower() for n in existing_news} + + new_items = [n for n in raw_news if n.title.strip().lower() not in existing_titles] + + if not new_items: + return [] + + # 3. Categorize new items using LLM if available + if self.llm_service: + processed_items = self.llm_service.categorize_news(new_items) + + # 4. Filter by importance and recency + # Important (strength >= 4) or recent (< 6 months) + six_months_ago = datetime.now() - timedelta(days=180) + + # 5. Save to repository + saved_items = [] + for item in processed_items: + is_important = item.catalyst_strength >= catalyst_strength + is_recent = item.provider_publish_time >= six_months_ago + + if not (is_important or is_recent): + continue + + # Double check uniqueness before adding, in case LLM took time + # and another process added it + item.symbol = symbol + saved_items.append(self.repository.add(item)) + return saved_items + else: + # If no LLM, just save with defaults (applying same recency filter if possible) + six_months_ago = datetime.now() - timedelta(days=180) + saved_items = [] + for item in new_items: + if item.provider_publish_time < six_months_ago: + continue + item.symbol = symbol + saved_items.append(self.repository.add(item)) + return saved_items diff --git a/trading_analysis/application/runners_service.py b/trading_analysis/application/runners_service.py new file mode 100644 index 0000000..4f022bb --- /dev/null +++ b/trading_analysis/application/runners_service.py @@ -0,0 +1,75 @@ +import asyncio +from typing import List, Optional, Callable +from datetime import datetime +from ..domain.interfaces import RunnersProvider, ScoringEngine, RunnersService, RunnersPersistence +from ..domain.models import RunnerItem, RunnerSnapshot, ScoreBreakdown + +class DefaultRunnersService(RunnersService): + def __init__( + self, + provider: RunnersProvider, + scoring_engine: ScoringEngine, + persistence: Optional[RunnersPersistence] = None + ): + self.provider = provider + self.scoring_engine = scoring_engine + self.persistence = persistence + + def get_runners(self, mode: str = "live", top_n: int = 10) -> RunnerSnapshot: + # 1. Get candidate tickers + candidates = self.provider.get_top_movers() + + # 2. Pull market snapshot for candidates + items = self.provider.get_market_snapshot(candidates) + + # 3. Apply universe filters (e.g. Price > 1) + # In a real app, this would be more configurable + filtered_items = [ + item for item in items + if item.price >= 1.0 and item.volume >= 500000 + ] + + # 4. Compute scores and classify + for item in filtered_items: + item.score = self.scoring_engine.compute_score(item) + + if item.score >= 70: + item.classification = "Strong Runner" + elif item.score >= 40: + item.classification = "Developing Runner" + else: + item.classification = "Ignore" + + # 5. Sort by score + sorted_runners = sorted(filtered_items, key=lambda x: x.score, reverse=True) + + snapshot = RunnerSnapshot( + timestamp=datetime.now(), + runners=sorted_runners[:top_n], + universe_size=len(filtered_items) + ) + + # 6. Persist results + if self.persistence: + try: + self.persistence.save_snapshot(snapshot) + except Exception: + # Log error but don't fail the request + pass + + return snapshot + + async def run_live_scan(self, interval: int = 60, callback: Optional[Callable] = None): + """Continuously scans for runners and persists/notifies.""" + while True: + try: + snapshot = self.get_runners(mode="live") + if callback: + callback(snapshot) + await asyncio.sleep(interval) + except asyncio.CancelledError: + break + except Exception as e: + # In a real app, use structured logging + print(f"Error in live scan: {e}") + await asyncio.sleep(interval) diff --git a/trading_analysis/domain/interfaces.py b/trading_analysis/domain/interfaces.py index 92aeacc..2836275 100644 --- a/trading_analysis/domain/interfaces.py +++ b/trading_analysis/domain/interfaces.py @@ -1,11 +1,12 @@ from abc import ABC, abstractmethod -from typing import List +from typing import List, Dict, Optional +from datetime import datetime from .models import ( CompanyFinancials, TechnicalLevels, NewsItem, FinalRecommendation, AnalysisSummary, RiskMetrics, RelativeStrengthReport, MonteCarloForecast, - MarketRegime, PortfolioImpactReport, FinalDecision, JudgeOpinion + MarketRegime, PortfolioImpactReport, FinalDecision, JudgeOpinion, + RunnerItem, ScoreBreakdown, RunnerSnapshot, RunnerNewsLink ) -from typing import List, Dict class DataProvider(ABC): @abstractmethod @@ -20,6 +21,44 @@ def get_technical_data(self, ticker: str) -> TechnicalLevels: def get_news(self, ticker: str) -> List[NewsItem]: pass +class RunnersProvider(ABC): + @abstractmethod + def get_market_snapshot(self, tickers: List[str]) -> List[RunnerItem]: + pass + + @abstractmethod + def get_top_movers(self) -> List[str]: + """Returns a list of candidate tickers that are moving.""" + pass + +class ScoringEngine(ABC): + @abstractmethod + def compute_score(self, item: RunnerItem) -> float: + pass + + @abstractmethod + def get_breakdown(self, item: RunnerItem) -> ScoreBreakdown: + pass + +class RunnersService(ABC): + @abstractmethod + def get_runners(self, mode: str = "live", top_n: int = 10) -> RunnerSnapshot: + pass + +class RunnersPersistence(ABC): + @abstractmethod + def save_snapshot(self, snapshot: RunnerSnapshot): + pass + + @abstractmethod + def get_history(self, ticker: Optional[str] = None, start_date: Optional[datetime] = None) -> List[RunnerItem]: + pass + +class BacktestEngine(ABC): + @abstractmethod + def run_backtest(self, tickers: List[str], date: datetime) -> dict: + pass + class RiskEngine(ABC): @abstractmethod def compute_metrics(self, ticker: str) -> RiskMetrics: @@ -45,6 +84,85 @@ class PortfolioImpactEngine(ABC): def analyze_impact(self, ticker: str, portfolio: Dict[str, float]) -> PortfolioImpactReport: pass +class NewsRepository(ABC): + @abstractmethod + def add(self, news: NewsItem) -> NewsItem: + pass + + @abstractmethod + def update(self, news_id: int, updates: dict) -> NewsItem: + pass + + @abstractmethod + def delete(self, news_id: int, soft: bool = True): + pass + + @abstractmethod + def get_by_id(self, news_id: int) -> Optional[NewsItem]: + pass + + @abstractmethod + def list(self, symbol: Optional[str] = None, news_type: Optional[str] = None, + sentiment: Optional[str] = None, limit: int = 50) -> List[NewsItem]: + pass + + @abstractmethod + def search(self, keyword: str) -> List[NewsItem]: + pass + + @abstractmethod + def link_to_runner(self, runner_id: str, news_id: int) -> RunnerNewsLink: + pass + + @abstractmethod + def unlink_from_runner(self, runner_id: str, news_id: int): + pass + + @abstractmethod + def get_links_for_runner(self, runner_id: str) -> List[RunnerNewsLink]: + pass + + @abstractmethod + def get_news_for_runner(self, runner_id: str) -> List[NewsItem]: + pass + +class NewsService(ABC): + @abstractmethod + def add_news(self, symbol: str, title: str, publisher: str, news_type: str, + sentiment: str, catalyst_strength: int, content: Optional[str] = None, + link: Optional[str] = None, publish_time: Optional[datetime] = None) -> NewsItem: + pass + + @abstractmethod + def update_news(self, news_id: int, title: Optional[str] = None, + content: Optional[str] = None, sentiment: Optional[str] = None, + catalyst_strength: Optional[int] = None) -> NewsItem: + pass + + @abstractmethod + def delete_news(self, news_id: int, force: bool = False): + pass + + @abstractmethod + def list_news(self, symbol: Optional[str] = None) -> List[NewsItem]: + pass + + @abstractmethod + def search_news(self, keyword: str) -> List[NewsItem]: + pass + + @abstractmethod + def link_news_to_runner(self, runner_id: str, news_id: int) -> RunnerNewsLink: + pass + + @abstractmethod + def unlink_news_from_runner(self, runner_id: str, news_id: int): + pass + + @abstractmethod + def auto_ingest_news(self, symbol: str, catalyst_strength: float) -> List[NewsItem]: + pass + class LLMService(ABC): @abstractmethod def analyze_health(self, financials: CompanyFinancials) -> str: @@ -77,3 +195,7 @@ def resolve_final_decision(self, opinions: List[JudgeOpinion], data: dict) -> Fi @abstractmethod def get_judge_opinions(self, data: dict) -> FinalRecommendation: pass + + @abstractmethod + def categorize_news(self, news: List[NewsItem]) -> List[NewsItem]: + pass diff --git a/trading_analysis/domain/models.py b/trading_analysis/domain/models.py index c0b6de8..3db2e06 100644 --- a/trading_analysis/domain/models.py +++ b/trading_analysis/domain/models.py @@ -25,12 +25,52 @@ class SentimentResult(BaseModel): label: str # Good/Bad or Bullish/Bearish summary: str +from enum import Enum + +class NewsType(str, Enum): + EARNINGS = "earnings" + GUIDANCE = "guidance" + FDA = "FDA" + MERGER = "merger" + ACQUISITION = "acquisition" + OFFERING = "offering" + DILUTION = "dilution" + CONTRACT = "contract" + ANALYST_UPGRADE = "analyst_upgrade" + ANALYST_DOWNGRADE = "analyst_downgrade" + MACRO = "macro" + OTHER = "other" + +class Sentiment(str, Enum): + POSITIVE = "positive" + NEUTRAL = "neutral" + NEGATIVE = "negative" + class NewsItem(BaseModel): + id: Optional[int] = None + symbol: str title: str + content: Optional[str] = None publisher: str - link: str + link: Optional[str] = None + news_type: NewsType = NewsType.OTHER + sentiment: Sentiment = Sentiment.NEUTRAL + catalyst_strength: int = Field(default=1, ge=1, le=5) provider_publish_time: datetime - summary: Optional[str] = None + created_at: datetime = Field(default_factory=datetime.now) + updated_at: datetime = Field(default_factory=datetime.now) + is_active: bool = True + + @field_validator('symbol') + @classmethod + def normalize_symbol(cls, v: str) -> str: + return v.upper() + +class RunnerNewsLink(BaseModel): + id: Optional[int] = None + runner_id: str + news_id: int + linked_at: datetime = Field(default_factory=datetime.now) class AnalysisSummary(BaseModel): health_summary: str @@ -194,12 +234,57 @@ def normalize_decision(cls, data: Any) -> Any: class ComprehensiveAnalysis(BaseModel): ticker: str - risk_metrics: RiskMetrics - relative_strength: RelativeStrengthReport - monte_carlo: MonteCarloForecast - market_regime: MarketRegime - portfolio_impact: PortfolioImpactReport + risk_metrics: Optional[RiskMetrics] = None + relative_strength: Optional[RelativeStrengthReport] = None + monte_carlo: Optional[MonteCarloForecast] = None + market_regime: Optional[MarketRegime] = None + portfolio_impact: Optional[PortfolioImpactReport] = None trader_opinion: JudgeOpinion analyst_opinion: JudgeOpinion risk_pro_opinion: JudgeOpinion final_decision: FinalDecision + +class RunnerItem(BaseModel): + ticker: str + price: float + change: float + pct_change: float + volume: int + relative_volume: Optional[float] = None + gap_pct: Optional[float] = None + vwap_dist: Optional[float] = None + atr: Optional[float] = None + volatility: Optional[float] = None + range_expansion: Optional[float] = None + volume_acceleration: Optional[float] = None + market_cap: Optional[float] = None + float_size: Optional[float] = None + score: float = 0.0 + classification: str = "Ignore" # Strong Runner, Developing Runner, Ignore + timestamp: datetime = Field(default_factory=datetime.now) + +class ScoreBreakdown(BaseModel): + momentum_score: float + liquidity_score: float + volatility_score: float + acceleration_score: float + catalyst_score: float = 0.0 + total_score: float + +class RunnerSnapshot(BaseModel): + timestamp: datetime + runners: List[RunnerItem] + universe_size: int + +class ExportData(BaseModel): + timestamp: datetime + ticker: str + signal: str # Buy/Hold/Sell + conviction: float + key_metrics: dict + +class PortfolioItem(BaseModel): + ticker: str + weight: float + entry_price: Optional[float] = None + name: Optional[str] = None diff --git a/trading_analysis/infrastructure/engines/backtest_engine.py b/trading_analysis/infrastructure/engines/backtest_engine.py new file mode 100644 index 0000000..619a6cd --- /dev/null +++ b/trading_analysis/infrastructure/engines/backtest_engine.py @@ -0,0 +1,94 @@ +import yfinance as yf +import pandas as pd +import numpy as np +from typing import List, Optional +from datetime import datetime, timedelta +from ...domain.interfaces import BacktestEngine, NewsRepository + +class MomentumBacktestEngine(BacktestEngine): + def __init__(self, news_repository: Optional[NewsRepository] = None): + self.news_repository = news_repository + + def run_backtest(self, tickers: List[str], date: datetime) -> dict: + if not tickers: + return {"error": "No tickers provided"} + + start_date = date.strftime('%Y-%m-%d') + end_date = (date + timedelta(days=1)).strftime('%Y-%m-%d') + + try: + # Download intraday data for the backtest date + data = yf.download(tickers, start=start_date, end=end_date, interval="5m", progress=False, group_by='ticker') + + results = [] + for ticker in tickers: + try: + t_data = data[ticker] if len(tickers) > 1 else data + if t_data.empty: continue + + # Simulated entry at the end of the first hour (approx 10:30 AM) + # 9:30 to 10:30 is 12 bars of 5m + if len(t_data) < 13: continue + + entry_price = float(t_data['Close'].iloc[12]) + remaining_data = t_data['Close'].iloc[12:] + + max_price = float(remaining_data.max()) + min_price = float(remaining_data.min()) + final_price = float(remaining_data.iloc[-1]) + + extension = (max_price - entry_price) / entry_price + drawdown = (min_price - entry_price) / entry_price + return_pct = (final_price - entry_price) / entry_price + + results.append({ + "ticker": ticker, + "extension": extension, + "max_drawdown": drawdown, + "return": return_pct, + "success": extension > 0.02, # 2% move after detection + "has_catalyst": self._check_catalyst(ticker, date) + }) + except Exception: + continue + + if not results: + return {"error": "No valid data for backtest"} + + # Aggregate metrics + win_rate = sum(1 for r in results if r['return'] > 0) / len(results) + avg_return = sum(r['return'] for r in results) / len(results) + avg_extension = sum(r['extension'] for r in results) / len(results) + + # Metrics with catalyst + with_cat = [r for r in results if r['has_catalyst']] + cat_metrics = None + if with_cat: + cat_metrics = { + "count": len(with_cat), + "win_rate": sum(1 for r in with_cat if r['return'] > 0) / len(with_cat), + "avg_return": sum(r['return'] for r in with_cat) / len(with_cat), + "avg_extension": sum(r['extension'] for r in with_cat) / len(with_cat) + } + + return { + "date": start_date, + "tickers_count": len(results), + "win_rate": win_rate, + "avg_return": avg_return, + "avg_extension": avg_extension, + "catalyst_metrics": cat_metrics, + "details": results + } + except Exception as e: + return {"error": str(e)} + + def _check_catalyst(self, ticker: str, date: datetime) -> bool: + if not self.news_repository: + return False + # Simplified: Check if any news existed for this symbol before or on the date + news = self.news_repository.list(symbol=ticker, limit=10) + # Filters news that happened within 48h before the backtest date + catalysts = [n for n in news if 0 <= (date - n.provider_publish_time).total_seconds() <= 172800] + return len(catalysts) > 0 + diff --git a/trading_analysis/infrastructure/engines/runners_scoring.py b/trading_analysis/infrastructure/engines/runners_scoring.py new file mode 100644 index 0000000..595f644 --- /dev/null +++ b/trading_analysis/infrastructure/engines/runners_scoring.py @@ -0,0 +1,73 @@ +from typing import Optional +from ...domain.interfaces import ScoringEngine, NewsRepository +from ...domain.models import RunnerItem, ScoreBreakdown, Sentiment, NewsType + +class MomentumScoringEngine(ScoringEngine): + def __init__(self, weights=None, news_repository: Optional[NewsRepository] = None): + self.weights = weights or { + "momentum": 0.4, + "liquidity": 0.2, + "volatility": 0.1, + "acceleration": 0.1, + "catalyst": 0.2 + } + self.news_repository = news_repository + + def compute_score(self, item: RunnerItem) -> float: + breakdown = self.get_breakdown(item) + return breakdown.total_score + + def get_breakdown(self, item: RunnerItem) -> ScoreBreakdown: + # 1. Momentum Score (0-100) + # Based on pct change (max at 15%) and range expansion + m_score = min(100, (item.pct_change * 5) + (item.range_expansion * 10)) + + # 2. Liquidity Score (0-100) + # Based on relative volume (high rel vol is good) + l_score = min(100, item.relative_volume * 10) + + # 3. Volatility Score (0-100) + # High intraday volatility relative to normal + v_score = min(100, item.volatility * 100) # Simplified + + # 4. Acceleration Score (0-100) + a_score = min(100, item.volume_acceleration * 20) + + # 5. Catalyst Score (0-100) + c_score = 0.0 + if self.news_repository: + # Check for news in the last 24 hours + news_items = self.news_repository.list(symbol=item.ticker, limit=5) + if news_items: + # Use the most recent news for scoring + latest = news_items[0] + base_c = latest.catalyst_strength * 10 # 10 to 50 + + sentiment_multiplier = 1.0 + if latest.sentiment == Sentiment.POSITIVE: + sentiment_multiplier = 1.5 + elif latest.sentiment == Sentiment.NEGATIVE: + sentiment_multiplier = 0.5 + + type_multiplier = 1.0 + if latest.news_type in [NewsType.EARNINGS, NewsType.FDA, NewsType.GUIDANCE]: + type_multiplier = 1.5 + + c_score = min(100, base_c * sentiment_multiplier * type_multiplier) + + total = ( + m_score * self.weights["momentum"] + + l_score * self.weights["liquidity"] + + v_score * self.weights["volatility"] + + a_score * self.weights["acceleration"] + + c_score * self.weights["catalyst"] + ) + + return ScoreBreakdown( + momentum_score=m_score, + liquidity_score=l_score, + volatility_score=v_score, + acceleration_score=a_score, + catalyst_score=c_score, + total_score=total + ) diff --git a/trading_analysis/infrastructure/llm_service.py b/trading_analysis/infrastructure/llm_service.py index 553476c..df152f4 100644 --- a/trading_analysis/infrastructure/llm_service.py +++ b/trading_analysis/infrastructure/llm_service.py @@ -6,7 +6,7 @@ from ..domain.models import ( CompanyFinancials, NewsItem, FinalRecommendation, JudgeOpinion, FinalDecision, RiskMetrics, RelativeStrengthReport, MonteCarloForecast, - MarketRegime, PortfolioImpactReport + MarketRegime, PortfolioImpactReport, NewsType, Sentiment ) class LiteLLMService(LLMService): @@ -149,7 +149,7 @@ def analyze_market_value(self, financials: CompanyFinancials, current_price: flo return self._get_content(response) def analyze_sentiment(self, news: List[NewsItem]) -> str: - news_details = "\n".join([f"- [{item.publisher}] {item.title} (Source: {item.link})" for item in news[:20]]) + news_details = "\n".join([f"- [{item.publisher}] {item.title} (Source: {item.link})" for item in news[:100]]) prompt = f""" Based on the following news and social media titles from various sources (Reuters, WSJ, Bloomberg, MarketWatch, and X), analyze the social sentiment and key data for the stock. @@ -297,6 +297,95 @@ def resolve_final_decision(self, opinions: List[JudgeOpinion], data: dict) -> Fi ) return FinalDecision(**self._parse_json_response(response)) + def categorize_news(self, news: List[NewsItem]) -> List[NewsItem]: + if not news: + return [] + + news_to_process = news + news_input = [] + for i, n in enumerate(news_to_process): + news_input.append({ + "id": i, + "title": n.title, + "content": (n.content or "")[:500] # Limit content length + }) + + prompt = f""" + You are a Financial News Classifier. Your task is to categorize the following news items into specific types, sentiments, and catalyst strengths. + + ALLOWED NEWS TYPES: + earnings, guidance, FDA, merger, acquisition, offering, dilution, contract, analyst_upgrade, analyst_downgrade, macro, other + + ALLOWED SENTIMENTS: + positive, neutral, negative + + CATALYST STRENGTH: + Integer from 1 (minor) to 5 (major impact). + + INPUT NEWS: + {json.dumps(news_input, indent=2)} + + Respond ONLY in JSON format as a list of objects with "id", "news_type", "sentiment", and "catalyst_strength". + + Example Response: + [ + {{"id": 0, "news_type": "earnings", "sentiment": "positive", "catalyst_strength": 4}}, + ... + ] + """ + + response = self._get_completion( + messages=[{"role": "user", "content": prompt}], + response_format={ "type": "json_object" } + ) + + results = self._parse_json_response(response) + + # results might be a list or a dict containing a list + if isinstance(results, dict): + # Try to find the list in common keys + for key in ["results", "news", "items", "categories"]: + if key in results and isinstance(results[key], list): + results = results[key] + break + else: + # If still dict, look for ANY list + for val in results.values(): + if isinstance(val, list): + results = val + break + + if not isinstance(results, list): + return [] + + processed_news = [] + for res in results: + try: + idx = res.get("id") + if idx is not None and idx < len(news_to_process): + n = news_to_process[idx].model_copy() + + # Normalize type + nt_str = str(res.get("news_type", "other")).lower() + try: + n.news_type = NewsType(nt_str) + except ValueError: + n.news_type = NewsType.OTHER + + # Normalize sentiment + st_str = str(res.get("sentiment", "neutral")).lower() + try: + n.sentiment = Sentiment(st_str) + except ValueError: + n.sentiment = Sentiment.NEUTRAL + + n.catalyst_strength = int(res.get("catalyst_strength", 1)) + processed_news.append(n) + except Exception: + continue + + return processed_news + def get_judge_opinions(self, data: dict) -> FinalRecommendation: # Legacy method for backward compatibility if needed, but we should use the new ones trader = self.get_trader_opinion(data) diff --git a/trading_analysis/infrastructure/news_gnews.py b/trading_analysis/infrastructure/news_gnews.py index 2ee0760..ada1017 100644 --- a/trading_analysis/infrastructure/news_gnews.py +++ b/trading_analysis/infrastructure/news_gnews.py @@ -1,4 +1,5 @@ import urllib.parse +import re from typing import List from datetime import datetime import feedparser @@ -7,17 +8,49 @@ class GoogleNewsRSSProvider: """ Fetch news via Google News RSS for specific domains reliably, avoiding heavy scraping. + It can also consume direct RSS feeds for specific providers when available. """ BASE_URL = "https://news.google.com/rss/search?q={query}&hl=en-US&gl=US&ceid=US:en" - def __init__(self, domains: List[str] | None = None): + DEFAULT_DIRECT_FEEDS = { + "reuters.com": "https://www.reuters.com/rssFeed/worldNews", + "wsj.com": "https://feeds.a.dj.com/rss/RSSMarketsMain.xml", + "ft.com": "https://www.ft.com/?format=rss", + "bloomberg.com": "https://www.bloomberg.com/feed/podcast/etf-report.xml", + "seekingalpha.com": "https://seekingalpha.com/market-news.xml", + } + + def __init__(self, domains: List[str] | None = None, direct_feeds: dict | None = None): self.domains = domains or [ "reuters.com", "wsj.com", "marketwatch.com", "bloomberg.com", + "ft.com", + "cnbc.com", + "finance.yahoo.com", + "barrons.com", + "seekingalpha.com", + "investing.com", + "thestreet.com", + "zacks.com", + "morningstar.com", + "bbc.com", + "aljazeera.com", + "politico.com", + "theguardian.com", + "asia.nikkei.com", + "techcrunch.com", + "theverge.com", + "wired.com", + "arstechnica.com", + "venturebeat.com", + "coindesk.com", + "cointelegraph.com", + "theblock.co" ] + self.direct_feeds = direct_feeds if direct_feeds is not None else self.DEFAULT_DIRECT_FEEDS def _build_url(self, ticker: str, domain: str) -> str: q = f"{ticker} stock site:{domain}" @@ -25,20 +58,70 @@ def _build_url(self, ticker: str, domain: str) -> str: def get_news(self, ticker: str) -> List[NewsItem]: items: List[NewsItem] = [] + ticker_lower = ticker.lower() + for domain in self.domains: - url = self._build_url(ticker, domain) - feed = feedparser.parse(url) - for entry in feed.entries[:6]: # take a few per domain - published = self._parse_published(entry) - items.append(NewsItem( - title=getattr(entry, 'title', ''), - publisher=domain_to_publisher(domain), - link=getattr(entry, 'link', ''), - provider_publish_time=published, - summary=getattr(entry, 'summary', None) - )) + # Check for direct feed + direct_url = self.direct_feeds.get(domain) + feed_items = [] + + if direct_url: + try: + feed = feedparser.parse(direct_url) + for entry in feed.entries[:100]: + title = getattr(entry, 'title', '') + summary = getattr(entry, 'summary', '') + # Content check (some feeds use content field) + content_list = getattr(entry, 'content', []) + content_val = content_list[0].get('value', '') if content_list else '' + + text_to_check = (title + " " + (summary or "") + " " + content_val).lower() + + # Use a slightly more robust check: whole word or followed by space/punctuation + # to avoid matching "T" in every word. + if self._is_ticker_in_text(ticker_lower, text_to_check): + published = self._parse_published(entry) + feed_items.append(NewsItem( + symbol=ticker, + title=title, + publisher=domain_to_publisher(domain), + link=getattr(entry, 'link', ''), + provider_publish_time=published, + content=summary or content_val + )) + except Exception: + # If direct feed fails, we'll let it fall through to Google News + pass + + # Fallback to Google News RSS search if no items found from direct feed + # OR if no direct feed exists for this domain + if not feed_items: + url = self._build_url(ticker, domain) + try: + feed = feedparser.parse(url) + for entry in feed.entries[:100]: + published = self._parse_published(entry) + feed_items.append(NewsItem( + symbol=ticker, + title=getattr(entry, 'title', ''), + publisher=domain_to_publisher(domain), + link=getattr(entry, 'link', ''), + provider_publish_time=published, + content=getattr(entry, 'summary', None) + )) + except Exception: + pass + + items.extend(feed_items) + return items + def _is_ticker_in_text(self, ticker_lower: str, text_lower: str) -> bool: + """Checks if ticker is in text, attempting to avoid partial word matches.""" + # Use word boundaries for all tickers to avoid partial matches (e.g., 'META' in 'metabolism') + pattern = r'\b' + re.escape(ticker_lower) + r'\b' + return bool(re.search(pattern, text_lower)) + def _parse_published(self, entry) -> datetime: try: if hasattr(entry, 'published_parsed') and entry.published_parsed is not None: @@ -55,5 +138,11 @@ def domain_to_publisher(domain: str) -> str: "wsj.com": "The Wall Street Journal", "marketwatch.com": "MarketWatch", "bloomberg.com": "Bloomberg", + "ft.com": "Financial Times", + "seekingalpha.com": "Seeking Alpha", + "cnbc.com": "CNBC", + "finance.yahoo.com": "Yahoo Finance", + "barrons.com": "Barron's", + "investing.com": "Investing.com", } return mapping.get(domain, domain) diff --git a/trading_analysis/infrastructure/persistence.py b/trading_analysis/infrastructure/persistence.py new file mode 100644 index 0000000..ab4fd1c --- /dev/null +++ b/trading_analysis/infrastructure/persistence.py @@ -0,0 +1,332 @@ +from typing import List, Optional +from datetime import datetime +from sqlalchemy import create_engine, Column, Integer, String, Float, DateTime, ForeignKey, Text, Boolean +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, relationship +from ..domain.interfaces import RunnersPersistence, NewsRepository +from ..domain.models import RunnerItem, RunnerSnapshot, NewsItem, RunnerNewsLink + +Base = declarative_base() + +class RunnerSnapshotDB(Base): + __tablename__ = 'runner_snapshots' + id = Column(Integer, primary_key=True) + timestamp = Column(DateTime, default=datetime.now) + universe_size = Column(Integer) + runners = relationship("RunnerItemDB", back_populates="snapshot") + +class RunnerItemDB(Base): + __tablename__ = 'runner_items' + id = Column(Integer, primary_key=True) + snapshot_id = Column(Integer, ForeignKey('runner_snapshots.id')) + ticker = Column(String(10)) + price = Column(Float) + change = Column(Float) + pct_change = Column(Float) + volume = Column(Integer) + relative_volume = Column(Float) + gap_pct = Column(Float) + vwap_dist = Column(Float) + volatility = Column(Float) + range_expansion = Column(Float) + volume_acceleration = Column(Float) + market_cap = Column(Float) + float_size = Column(Float) + score = Column(Float) + classification = Column(String(50)) + timestamp = Column(DateTime) + + snapshot = relationship("RunnerSnapshotDB", back_populates="runners") + +class NewsDB(Base): + __tablename__ = 'news' + id = Column(Integer, primary_key=True) + symbol = Column(String(10), index=True) + title = Column(String(255)) + content = Column(Text) + publisher = Column(String(100)) + link = Column(String(500)) + news_type = Column(String(50), index=True) + sentiment = Column(String(20), index=True) + catalyst_strength = Column(Integer) + provider_publish_time = Column(DateTime, index=True) + created_at = Column(DateTime, default=datetime.now) + updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now) + is_active = Column(Boolean, default=True) + +class RunnerNewsLinkDB(Base): + __tablename__ = 'runner_news_links' + id = Column(Integer, primary_key=True) + runner_id = Column(String(50), index=True) # e.g. 20250218-TSLA + news_id = Column(Integer, ForeignKey('news.id'), index=True) + linked_at = Column(DateTime, default=datetime.now) + +class SQLAlchemyRunnersPersistence(RunnersPersistence): + def __init__(self, database_url: str = "sqlite:///runners.db"): + self.engine = create_engine(database_url) + Base.metadata.create_all(self.engine) + self.Session = sessionmaker(bind=self.engine) + + def save_snapshot(self, snapshot: RunnerSnapshot): + session = self.Session() + try: + db_snapshot = RunnerSnapshotDB( + timestamp=snapshot.timestamp, + universe_size=snapshot.universe_size + ) + session.add(db_snapshot) + session.flush() # To get the id + + for item in snapshot.runners: + db_item = RunnerItemDB( + snapshot_id=db_snapshot.id, + ticker=item.ticker, + price=item.price, + change=item.change, + pct_change=item.pct_change, + volume=item.volume, + relative_volume=item.relative_volume, + gap_pct=item.gap_pct, + vwap_dist=item.vwap_dist, + volatility=item.volatility, + range_expansion=item.range_expansion, + volume_acceleration=item.volume_acceleration, + market_cap=item.market_cap, + float_size=item.float_size, + score=item.score, + classification=item.classification, + timestamp=item.timestamp + ) + session.add(db_item) + + session.commit() + except Exception: + session.rollback() + raise + finally: + session.close() + + def get_history(self, ticker: Optional[str] = None, start_date: Optional[datetime] = None) -> List[RunnerItem]: + session = self.Session() + try: + query = session.query(RunnerItemDB) + if ticker: + query = query.filter(RunnerItemDB.ticker == ticker) + if start_date: + query = query.filter(RunnerItemDB.timestamp >= start_date) + + db_items = query.order_by(RunnerItemDB.timestamp.desc()).all() + + return [ + RunnerItem( + ticker=db_item.ticker, + price=db_item.price, + change=db_item.change, + pct_change=db_item.pct_change, + volume=db_item.volume, + relative_volume=db_item.relative_volume, + gap_pct=db_item.gap_pct, + vwap_dist=db_item.vwap_dist, + volatility=db_item.volatility, + range_expansion=db_item.range_expansion, + volume_acceleration=db_item.volume_acceleration, + market_cap=db_item.market_cap, + float_size=db_item.float_size, + score=db_item.score, + classification=db_item.classification, + timestamp=db_item.timestamp + ) + for db_item in db_items + ] + finally: + session.close() + +class SQLAlchemyNewsRepository(NewsRepository): + def __init__(self, database_url: str = "sqlite:///runners.db"): + self.engine = create_engine(database_url) + Base.metadata.create_all(self.engine) + self.Session = sessionmaker(bind=self.engine) + + def add(self, news: NewsItem) -> NewsItem: + session = self.Session() + try: + db_news = NewsDB( + symbol=news.symbol, + title=news.title, + content=news.content, + publisher=news.publisher, + link=news.link, + news_type=news.news_type.value if hasattr(news.news_type, 'value') else news.news_type, + sentiment=news.sentiment.value if hasattr(news.sentiment, 'value') else news.sentiment, + catalyst_strength=news.catalyst_strength, + provider_publish_time=news.provider_publish_time, + created_at=news.created_at, + updated_at=news.updated_at, + is_active=news.is_active + ) + session.add(db_news) + session.commit() + session.refresh(db_news) + news.id = db_news.id + return news + except Exception: + session.rollback() + raise + finally: + session.close() + + def update(self, news_id: int, updates: dict) -> NewsItem: + session = self.Session() + try: + db_news = session.query(NewsDB).filter(NewsDB.id == news_id).first() + if not db_news: + raise ValueError(f"News with id {news_id} not found") + + for key, value in updates.items(): + if hasattr(db_news, key): + if key in ['news_type', 'sentiment'] and hasattr(value, 'value'): + setattr(db_news, key, value.value) + else: + setattr(db_news, key, value) + + db_news.updated_at = datetime.now() + session.commit() + session.refresh(db_news) + return self._map_to_domain(db_news) + except Exception: + session.rollback() + raise + finally: + session.close() + + def delete(self, news_id: int, soft: bool = True): + session = self.Session() + try: + db_news = session.query(NewsDB).filter(NewsDB.id == news_id).first() + if db_news: + if soft: + db_news.is_active = False + else: + session.delete(db_news) + session.commit() + except Exception: + session.rollback() + raise + finally: + session.close() + + def get_by_id(self, news_id: int) -> Optional[NewsItem]: + session = self.Session() + try: + db_news = session.query(NewsDB).filter(NewsDB.id == news_id).first() + if db_news: + return self._map_to_domain(db_news) + return None + finally: + session.close() + + def list(self, symbol: Optional[str] = None, news_type: Optional[str] = None, + sentiment: Optional[str] = None, limit: int = 50) -> List[NewsItem]: + session = self.Session() + try: + query = session.query(NewsDB).filter(NewsDB.is_active == True) + if symbol: + query = query.filter(NewsDB.symbol == symbol.upper()) + if news_type: + query = query.filter(NewsDB.news_type == news_type) + if sentiment: + query = query.filter(NewsDB.sentiment == sentiment) + + db_news_list = query.order_by(NewsDB.provider_publish_time.desc()).limit(limit).all() + return [self._map_to_domain(db_news) for db_news in db_news_list] + finally: + session.close() + + def search(self, keyword: str) -> List[NewsItem]: + session = self.Session() + try: + query = session.query(NewsDB).filter( + NewsDB.is_active == True, + (NewsDB.title.ilike(f"%{keyword}%") | NewsDB.content.ilike(f"%{keyword}%")) + ) + db_news_list = query.order_by(NewsDB.provider_publish_time.desc()).all() + return [self._map_to_domain(db_news) for db_news in db_news_list] + finally: + session.close() + + def link_to_runner(self, runner_id: str, news_id: int) -> RunnerNewsLink: + session = self.Session() + try: + db_link = RunnerNewsLinkDB(runner_id=runner_id, news_id=news_id) + session.add(db_link) + session.commit() + session.refresh(db_link) + return RunnerNewsLink( + id=db_link.id, + runner_id=db_link.runner_id, + news_id=db_link.news_id, + linked_at=db_link.linked_at + ) + except Exception: + session.rollback() + raise + finally: + session.close() + + def unlink_from_runner(self, runner_id: str, news_id: int): + session = self.Session() + try: + db_link = session.query(RunnerNewsLinkDB).filter( + RunnerNewsLinkDB.runner_id == runner_id, + RunnerNewsLinkDB.news_id == news_id + ).first() + if db_link: + session.delete(db_link) + session.commit() + except Exception: + session.rollback() + raise + finally: + session.close() + + def get_links_for_runner(self, runner_id: str) -> List[RunnerNewsLink]: + session = self.Session() + try: + db_links = session.query(RunnerNewsLinkDB).filter(RunnerNewsLinkDB.runner_id == runner_id).all() + return [ + RunnerNewsLink( + id=db_link.id, + runner_id=db_link.runner_id, + news_id=db_link.news_id, + linked_at=db_link.linked_at + ) for db_link in db_links + ] + finally: + session.close() + + def get_news_for_runner(self, runner_id: str) -> List[NewsItem]: + session = self.Session() + try: + db_news_list = session.query(NewsDB).join( + RunnerNewsLinkDB, NewsDB.id == RunnerNewsLinkDB.news_id + ).filter(RunnerNewsLinkDB.runner_id == runner_id).all() + return [self._map_to_domain(db_news) for db_news in db_news_list] + finally: + session.close() + + def _map_to_domain(self, db_news: NewsDB) -> NewsItem: + return NewsItem( + id=db_news.id, + symbol=db_news.symbol, + title=db_news.title, + content=db_news.content, + publisher=db_news.publisher, + link=db_news.link, + news_type=db_news.news_type, + sentiment=db_news.sentiment, + catalyst_strength=db_news.catalyst_strength, + provider_publish_time=db_news.provider_publish_time, + created_at=db_news.created_at, + updated_at=db_news.updated_at, + is_active=db_news.is_active + ) diff --git a/trading_analysis/infrastructure/portfolio_manager.py b/trading_analysis/infrastructure/portfolio_manager.py new file mode 100644 index 0000000..966be36 --- /dev/null +++ b/trading_analysis/infrastructure/portfolio_manager.py @@ -0,0 +1,80 @@ +import json +import os +from typing import Dict, Any +import yfinance as yf +from ..domain.models import PortfolioItem + +class PortfolioManager: + def __init__(self, filepath: str = "portfolio.json"): + self.filepath = filepath + self._portfolio = self._load() + + def _load(self) -> Dict[str, PortfolioItem]: + if not os.path.exists(self.filepath): + # Default portfolio if file doesn't exist + return { + "SPY": PortfolioItem(ticker="SPY", weight=0.5, name="SPDR S&P 500 ETF Trust"), + "QQQ": PortfolioItem(ticker="QQQ", weight=0.3, name="Invesco QQQ Trust"), + "TLT": PortfolioItem(ticker="TLT", weight=0.2, name="iShares 20+ Year Treasury Bond ETF") + } + try: + with open(self.filepath, 'r') as f: + data = json.load(f) + + portfolio = {} + for ticker, val in data.items(): + if isinstance(val, (int, float)): + # Migration from old format: {ticker: weight} + portfolio[ticker] = PortfolioItem(ticker=ticker, weight=val) + elif isinstance(val, dict): + portfolio[ticker] = PortfolioItem(**val) + else: + # Fallback + portfolio[ticker] = PortfolioItem(ticker=ticker, weight=0.0) + return portfolio + except Exception: + return { + "SPY": PortfolioItem(ticker="SPY", weight=0.5, name="SPDR S&P 500 ETF Trust"), + "QQQ": PortfolioItem(ticker="QQQ", weight=0.3, name="Invesco QQQ Trust"), + "TLT": PortfolioItem(ticker="TLT", weight=0.2, name="iShares 20+ Year Treasury Bond ETF") + } + + def save(self): + with open(self.filepath, 'w') as f: + data = {ticker: item.model_dump() for ticker, item in self._portfolio.items()} + json.dump(data, f, indent=4) + + def get_portfolio(self) -> Dict[str, PortfolioItem]: + return self._portfolio + + def update_ticker(self, ticker: str, weight: float, entry_price: float = None, name: str = None): + if weight <= 0: + if ticker in self._portfolio: + del self._portfolio[ticker] + else: + if not name: + # Try to get name from yfinance if not provided and it's a new ticker or name missing + if ticker not in self._portfolio or not self._portfolio[ticker].name: + try: + stock = yf.Ticker(ticker) + name = stock.info.get('longName', ticker) + except Exception: + name = ticker + else: + name = self._portfolio[ticker].name + + if entry_price is None and ticker in self._portfolio: + entry_price = self._portfolio[ticker].entry_price + + self._portfolio[ticker] = PortfolioItem( + ticker=ticker, + weight=weight, + entry_price=entry_price, + name=name + ) + self.save() + + def remove_ticker(self, ticker: str): + if ticker in self._portfolio: + del self._portfolio[ticker] + self.save() diff --git a/trading_analysis/infrastructure/runners_data.py b/trading_analysis/infrastructure/runners_data.py new file mode 100644 index 0000000..cc42ec4 --- /dev/null +++ b/trading_analysis/infrastructure/runners_data.py @@ -0,0 +1,117 @@ +import yfinance as yf +import pandas as pd +import numpy as np +from typing import List +from datetime import datetime, timedelta +from ..domain.interfaces import RunnersProvider +from ..domain.models import RunnerItem + +class YFinanceRunnersProvider(RunnersProvider): + def get_top_movers(self) -> List[str]: + """Returns a list of candidate tickers that are moving using yfinance's screeners.""" + tickers = set() + try: + gainers = yf.get_daily_gainers() + if gainers is not None and not gainers.empty: + tickers.update(gainers.index.tolist()) + except Exception: + pass + + try: + active = yf.get_daily_most_active() + if active is not None and not active.empty: + tickers.update(active.index.tolist()) + except Exception: + pass + + return list(tickers) + + def get_market_snapshot(self, tickers: List[str]) -> List[RunnerItem]: + if not tickers: + return [] + + # Download 1d data at 5m or 15m intervals for intraday metrics + # And 1mo data for daily relative volume baseline + try: + # For simplicity, we fetch info for each ticker to get market cap, etc. + # And history for metrics. + results = [] + + # Batching might be better but yf.download doesn't return info. + # We use yf.download for history and separate calls for info. + + # 1. Daily baseline (last 30 days) + daily_data = yf.download(tickers, period="35d", interval="1d", progress=False, group_by='ticker') + + # 2. Intraday data + intraday_data = yf.download(tickers, period="1d", interval="5m", progress=False, group_by='ticker') + + for ticker in tickers: + try: + t_daily = daily_data[ticker] if len(tickers) > 1 else daily_data + t_intra = intraday_data[ticker] if len(tickers) > 1 else intraday_data + + if t_daily.empty or t_intra.empty: + continue + + # Basic metrics + last_price = float(t_intra['Close'].iloc[-1]) + prev_close = float(t_daily['Close'].iloc[-2]) # Yesterday's close + open_price = float(t_intra['Open'].iloc[0]) + + pct_change = (last_price - prev_close) / prev_close + gap_pct = (open_price - prev_close) / prev_close + + # Relative Volume (Cumulative volume today vs avg volume last 30 days) + # Note: Today's volume in t_daily might be incomplete if market is open + today_vol = float(t_intra['Volume'].sum()) + avg_vol_30d = float(t_daily['Volume'].iloc[:-1].tail(30).mean()) + rel_vol = today_vol / avg_vol_30d if avg_vol_30d > 0 else 1.0 + + # VWAP calculation + # VWAP = Sum(Price * Volume) / Sum(Volume) + typical_price = (t_intra['High'] + t_intra['Low'] + t_intra['Close']) / 3 + vwap = (typical_price * t_intra['Volume']).cumsum() / t_intra['Volume'].cumsum() + current_vwap = float(vwap.iloc[-1]) + vwap_dist = (last_price - current_vwap) / current_vwap if current_vwap > 0 else 0.0 + + # Volume Acceleration (last 15m vs avg of the day) + last_15m_vol = float(t_intra['Volume'].tail(3).sum()) + avg_5m_vol = float(t_intra['Volume'].mean()) + vol_acc = last_15m_vol / (avg_5m_vol * 3) if avg_5m_vol > 0 else 1.0 + + # Range expansion (Intraday range / Avg Daily Range 14d) + intraday_range = float(t_intra['High'].max() - t_intra['Low'].min()) + daily_ranges = (t_daily['High'] - t_daily['Low']).iloc[:-1].tail(14) + adr = float(daily_ranges.mean()) + range_exp = intraday_range / adr if adr > 0 else 1.0 + + # Fetch info for market cap and float (Slow, but necessary if not cached) + # In a real app, we would cache this or use a faster provider. + info = yf.Ticker(ticker).info + mcap = info.get('marketCap') + # yfinance float is often missing or called differently + float_s = info.get('floatShares') + + results.append(RunnerItem( + ticker=ticker, + price=last_price, + change=last_price - prev_close, + pct_change=pct_change * 100, + volume=int(today_vol), + relative_volume=rel_vol, + gap_pct=gap_pct * 100, + vwap_dist=vwap_dist * 100, + volatility=float(t_intra['Close'].pct_change().std() * np.sqrt(252 * 78)), # 78 5m bars in a day + range_expansion=range_exp, + volume_acceleration=vol_acc, + market_cap=mcap, + float_size=float_s, + timestamp=datetime.now() + )) + except Exception as e: + # Skip if data is malformed for this ticker + continue + return results + except Exception: + return []