diff --git a/eval_protocol/adapters/__init__.py b/eval_protocol/adapters/__init__.py index fc04237b..57757901 100644 --- a/eval_protocol/adapters/__init__.py +++ b/eval_protocol/adapters/__init__.py @@ -6,6 +6,7 @@ Available adapters: - LangfuseAdapter: Pull data from Langfuse deployments - HuggingFaceAdapter: Load datasets from HuggingFace Hub +- BigQueryAdapter: Query data from Google BigQuery - Braintrust integration (legacy) - TRL integration (legacy) """ @@ -13,35 +14,56 @@ # Conditional imports based on available dependencies try: from .langfuse import LangfuseAdapter, create_langfuse_adapter + __all__ = ["LangfuseAdapter", "create_langfuse_adapter"] except ImportError: __all__ = [] try: from .huggingface import ( - HuggingFaceAdapter, - create_huggingface_adapter, + HuggingFaceAdapter, create_gsm8k_adapter, + create_huggingface_adapter, create_math_adapter, ) - __all__.extend([ - "HuggingFaceAdapter", - "create_huggingface_adapter", - "create_gsm8k_adapter", - "create_math_adapter", - ]) + + __all__.extend( + [ + "HuggingFaceAdapter", + "create_huggingface_adapter", + "create_gsm8k_adapter", + "create_math_adapter", + ] + ) +except ImportError: + pass + +try: + from .bigquery import ( + BigQueryAdapter, + create_bigquery_adapter, + ) + + __all__.extend( + [ + "BigQueryAdapter", + "create_bigquery_adapter", + ] + ) except ImportError: pass # Legacy adapters (always available) try: from .braintrust import reward_fn_to_scorer, scorer_to_reward_fn + __all__.extend(["scorer_to_reward_fn", "reward_fn_to_scorer"]) except ImportError: pass try: from .trl import create_trl_adapter + __all__.extend(["create_trl_adapter"]) except ImportError: pass diff --git a/eval_protocol/adapters/bigquery.py b/eval_protocol/adapters/bigquery.py new file mode 100644 index 00000000..1275b1e1 --- /dev/null +++ b/eval_protocol/adapters/bigquery.py @@ -0,0 +1,285 @@ +"""Google BigQuery adapter for Eval Protocol. + +This adapter allows querying data from Google BigQuery tables and converting it +to EvaluationRow format for use in evaluation pipelines. +""" + +import logging +from typing import Any, Callable, Dict, Iterator, List, Optional, Union + +from eval_protocol.models import CompletionParams, EvaluationRow, InputMetadata, Message + +logger = logging.getLogger(__name__) + +try: + from google.auth.exceptions import DefaultCredentialsError + from google.cloud import bigquery + from google.cloud.exceptions import Forbidden, NotFound + from google.oauth2 import service_account + + BIGQUERY_AVAILABLE = True +except ImportError: + BIGQUERY_AVAILABLE = False + logger.warning("Google Cloud BigQuery not installed. Install with: pip install 'eval-protocol[bigquery]'") + +# Type alias for transformation function +TransformFunction = Callable[[Dict[str, Any]], Dict[str, Any]] + + +class BigQueryAdapter: + """Adapter to query data from Google BigQuery and convert to EvaluationRow format. + + This adapter connects to Google BigQuery, executes SQL queries, and applies + a user-provided transformation function to convert each row to the format + expected by EvaluationRow. + + The transformation function should take a BigQuery row dictionary and return: + { + 'messages': List[Dict] - list of message dictionaries with 'role' and 'content' + 'ground_truth': Optional[str] - expected answer/output + 'metadata': Optional[Dict] - any additional metadata to preserve + 'tools': Optional[List[Dict]] - tool definitions for tool calling scenarios + } + """ + + def __init__( + self, + transform_fn: TransformFunction, + dataset_id: Optional[str] = None, + credentials_path: Optional[str] = None, + location: Optional[str] = None, + **client_kwargs, + ): + """Initialize the BigQuery adapter. + + Args: + transform_fn: Function to transform BigQuery rows to evaluation format + dataset_id: Google Cloud project ID (if None, uses default from environment) + credentials_path: Path to service account JSON file (if None, uses default auth) + location: Default location for BigQuery jobs + **client_kwargs: Additional arguments to pass to BigQuery client + + Raises: + ImportError: If google-cloud-bigquery is not installed + DefaultCredentialsError: If authentication fails + """ + if not BIGQUERY_AVAILABLE: + raise ImportError( + "Google Cloud BigQuery not installed. Install with: pip install 'eval-protocol[bigquery]'" + ) + + self.transform_fn = transform_fn + self.dataset_id = dataset_id + self.location = location + + # Initialize BigQuery client + try: + client_args = {} + if dataset_id: + client_args["project"] = dataset_id + if credentials_path: + credentials = service_account.Credentials.from_service_account_file(credentials_path) + client_args["credentials"] = credentials + if location: + client_args["location"] = location + + client_args.update(client_kwargs) + self.client = bigquery.Client(**client_args) + + except DefaultCredentialsError as e: + logger.error("Failed to authenticate with BigQuery: %s", e) + raise + except Exception as e: + logger.error("Failed to initialize BigQuery client: %s", e) + raise + + def get_evaluation_rows( + self, + query: str, + query_params: Optional[List[Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]]] = None, + limit: Optional[int] = None, + offset: int = 0, + model_name: str = "gpt-3.5-turbo", + temperature: float = 0.0, + max_tokens: Optional[int] = None, + **completion_params_kwargs, + ) -> Iterator[EvaluationRow]: + """Execute BigQuery query and convert results to EvaluationRow format. + + Args: + query: SQL query to execute + query_params: Optional list of query parameters for parameterized queries + limit: Maximum number of rows to return (applied after BigQuery query) + offset: Number of rows to skip (applied after BigQuery query) + model_name: Model name for completion parameters + temperature: Temperature for completion parameters + max_tokens: Max tokens for completion parameters + **completion_params_kwargs: Additional completion parameters + + Yields: + EvaluationRow: Converted evaluation rows + + Raises: + NotFound: If the query references non-existent tables/datasets + Forbidden: If insufficient permissions + """ + try: + # Configure query job + job_config = bigquery.QueryJobConfig() + if query_params: + job_config.query_parameters = query_params + if self.location: + job_config.location = self.location + + query_job = self.client.query(query, job_config=job_config) + + results = query_job.result() + + completion_params: CompletionParams = { + "model": model_name, + "temperature": temperature, + "max_tokens": max_tokens, + **completion_params_kwargs, + } + + # Convert rows with offset/limit + row_count = 0 + processed_count = 0 + + for raw_row in results: + # Apply offset + if row_count < offset: + row_count += 1 + continue + + # Apply limit + if limit is not None and processed_count >= limit: + break + + try: + eval_row = self._convert_row_to_evaluation_row(raw_row, processed_count, completion_params) + if eval_row: + yield eval_row + processed_count += 1 + + except (AttributeError, ValueError, KeyError) as e: + logger.warning("Failed to convert row %d: %s", row_count, e) + + row_count += 1 + + except (NotFound, Forbidden) as e: + logger.error("BigQuery access error: %s", e) + raise + except Exception as e: + logger.error("Error executing BigQuery query: %s", e) + raise + + def _convert_row_to_evaluation_row( + self, + raw_row: Dict[str, Any], + row_index: int, + completion_params: CompletionParams, + ) -> EvaluationRow: + """Convert a single BigQuery row to EvaluationRow format. + + Args: + raw_row: BigQuery row dictionary + row_index: Index of the row in the result set + completion_params: Completion parameters to use + + Returns: + EvaluationRow object or None if conversion fails + """ + # Apply user transformation + transformed = self.transform_fn(raw_row) + + # Validate required fields + if "messages" not in transformed: + raise ValueError("Transform function must return 'messages' field") + + # Convert message dictionaries to Message objects + messages = [] + for msg_dict in transformed["messages"]: + if not isinstance(msg_dict, dict): + raise ValueError("Each message must be a dictionary") + if "role" not in msg_dict: + raise ValueError("Each message must have a 'role' field") + + messages.append( + Message( + role=msg_dict["role"], + content=msg_dict.get("content"), + name=msg_dict.get("name"), + tool_call_id=msg_dict.get("tool_call_id"), + tool_calls=msg_dict.get("tool_calls"), + function_call=msg_dict.get("function_call"), + ) + ) + + # Extract other fields + ground_truth = transformed.get("ground_truth") + tools = transformed.get("tools") + user_metadata = transformed.get("metadata", {}) + + # Create dataset info + dataset_info = { + "source": "bigquery", + "dataset_id": self.dataset_id or self.client.project, + "row_index": row_index, + "transform_function": ( + self.transform_fn.__name__ if hasattr(self.transform_fn, "__name__") else "anonymous" + ), + } + + # Add user metadata + dataset_info.update(user_metadata) + + # Add original row data (with prefix to avoid conflicts) + for key, value in raw_row.items(): + # Convert BigQuery types to JSON-serializable types + dataset_info[f"original_{key}"] = value + + # Create input metadata (following HuggingFace pattern) + input_metadata = InputMetadata( + row_id=f"{self.dataset_id}_{row_index}", + completion_params=completion_params, + dataset_info=dataset_info, + session_data={ + "dataset_source": "bigquery", + }, + ) + + return EvaluationRow( + messages=messages, + tools=tools, + input_metadata=input_metadata, + ground_truth=str(ground_truth) if ground_truth is not None else None, + ) + + +def create_bigquery_adapter( + transform_fn: TransformFunction, + dataset_id: Optional[str] = None, + credentials_path: Optional[str] = None, + location: Optional[str] = None, + **client_kwargs, +) -> BigQueryAdapter: + """Factory function to create a BigQuery adapter. + + Args: + transform_fn: Function to transform BigQuery rows to evaluation format + dataset_id: Google Cloud project ID + credentials_path: Path to service account JSON file + location: Default location for BigQuery jobs + **client_kwargs: Additional arguments for BigQuery client + + Returns: + BigQueryAdapter instance + """ + return BigQueryAdapter( + transform_fn=transform_fn, + dataset_id=dataset_id, + credentials_path=credentials_path, + location=location, + **client_kwargs, + ) diff --git a/examples/adapters/README.md b/examples/adapters/README.md index f51cd387..591bbbb2 100644 --- a/examples/adapters/README.md +++ b/examples/adapters/README.md @@ -43,6 +43,34 @@ Loads datasets from HuggingFace Hub and converts them to EvaluationRow format. pip install 'eval-protocol[huggingface]' ``` +### 3. BigQuery Adapter (`bigquery_example.py`) + +Queries data from Google BigQuery tables and converts them to EvaluationRow format. + +**Features:** +- Execute custom SQL queries against BigQuery datasets +- Support for parameterized queries and batch processing +- Built-in convenience adapters for conversation and Q&A data +- Rich metadata preservation including query information +- Integration with Google Cloud authentication +- Schema introspection and dataset exploration + +**Prerequisites:** +```bash +pip install 'eval-protocol[bigquery]' +``` + +**Environment Variables:** +```bash +export GOOGLE_CLOUD_PROJECT="your-project-id" +export GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json" # optional +``` + +**Alternative Authentication:** +```bash +gcloud auth application-default login +``` + ## Running the Examples ### Basic Usage @@ -51,9 +79,12 @@ pip install 'eval-protocol[huggingface]' # Run Langfuse example python examples/adapters/langfuse_example.py -# Run HuggingFace example +# Run HuggingFace example python examples/adapters/huggingface_example.py +# Run BigQuery example +python examples/adapters/bigquery_example.py + # Run GSM8K replacement example python examples/adapters/gsm8k_replacement_example.py ``` @@ -66,6 +97,11 @@ export LANGFUSE_PUBLIC_KEY="pk_..." export LANGFUSE_SECRET_KEY="sk_..." python examples/adapters/langfuse_example.py +# Set up Google Cloud credentials for BigQuery +export GOOGLE_CLOUD_PROJECT="your-project-id" +export GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json" # optional +python examples/adapters/bigquery_example.py + # HuggingFace works without credentials for public datasets python examples/adapters/huggingface_example.py ``` @@ -100,7 +136,7 @@ def custom_gsm8k_transform(row): from eval_protocol.adapters.huggingface import create_huggingface_adapter custom_adapter = create_huggingface_adapter( dataset_id="gsm8k", - config_name="main", + config_name="main", transform_fn=custom_gsm8k_transform ) ``` @@ -150,7 +186,7 @@ rows = list(adapter.get_evaluation_rows(limit=10)) for row in rows: # Add model response (you would generate this) row.messages.append(Message(role="assistant", content="...")) - + # Evaluate result = math_reward(messages=row.messages, ground_truth=row.ground_truth) print(f"Score: {result.score}") @@ -222,7 +258,7 @@ class MyCustomAdapter: def __init__(self, **config): # Initialize your data source connection pass - + def get_evaluation_rows(self, **kwargs) -> Iterator[EvaluationRow]: # Fetch data and convert to EvaluationRow format pass @@ -272,4 +308,4 @@ We welcome contributions of new adapters! Popular integrations that would be val - **File format adapters**: Parquet, Excel, etc. - **Monitoring platform adapters**: DataDog, New Relic, etc. -See the adapter contributing guide for detailed instructions. \ No newline at end of file +See the adapter contributing guide for detailed instructions. diff --git a/pyproject.toml b/pyproject.toml index 4026ce9e..73105fd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,10 +109,18 @@ huggingface = [ "datasets>=2.0.0", "transformers>=4.0.0", ] +bigquery = [ + "google-cloud-bigquery>=3.0.0", + "google-auth>=2.0.0", + "google-auth-oauthlib>=1.0.0", +] adapters = [ "langfuse>=2.0.0", "datasets>=2.0.0", "transformers>=4.0.0", + "google-cloud-bigquery>=3.0.0", + "google-auth>=2.0.0", + "google-auth-oauthlib>=1.0.0", ] svgbench = [ "selenium>=4.0.0", diff --git a/tests/test_adapters_e2e.py b/tests/test_adapters_e2e.py index a598dff7..72449e8b 100644 --- a/tests/test_adapters_e2e.py +++ b/tests/test_adapters_e2e.py @@ -6,31 +6,34 @@ """ import os -import pytest from datetime import datetime, timedelta -from typing import Dict, Any +from typing import Any, Dict + +import pytest -from eval_protocol.models import EvaluationRow, Message, InputMetadata +from eval_protocol.models import EvaluationRow, InputMetadata, Message class TestLangfuseAdapterE2E: """End-to-end tests for Langfuse adapter with real deployment.""" - + def _get_langfuse_credentials(self): """Get Langfuse credentials from environment.""" public_key = os.getenv("LANGFUSE_PUBLIC_KEY") secret_key = os.getenv("LANGFUSE_SECRET_KEY") host = os.getenv("LANGFUSE_HOST", "https://langfuse-web-prod-zfdbl7ykrq-uc.a.run.app") project_id = os.getenv("LANGFUSE_PROJECT_ID", "cmdj5yxhk0006s6022cyi0prv") - + return public_key, secret_key, host, project_id - + @pytest.mark.skipif( - not all([ - os.getenv("LANGFUSE_PUBLIC_KEY"), - os.getenv("LANGFUSE_SECRET_KEY"), - ]), - reason="Langfuse credentials not available in environment" + not all( + [ + os.getenv("LANGFUSE_PUBLIC_KEY"), + os.getenv("LANGFUSE_SECRET_KEY"), + ] + ), + reason="Langfuse credentials not available in environment", ) def test_langfuse_adapter_real_connection(self): """Test that we can connect to real Langfuse deployment and pull data.""" @@ -38,9 +41,9 @@ def test_langfuse_adapter_real_connection(self): from eval_protocol.adapters.langfuse import create_langfuse_adapter except ImportError: pytest.skip("Langfuse dependencies not installed") - + public_key, secret_key, host, project_id = self._get_langfuse_credentials() - + # Create adapter adapter = create_langfuse_adapter( public_key=public_key, @@ -48,40 +51,47 @@ def test_langfuse_adapter_real_connection(self): host=host, project_id=project_id, ) - + # Test basic connection by trying to get a small number of traces rows = list(adapter.get_evaluation_rows(limit=3)) - + # Verify we got some data assert isinstance(rows, list), "Should return a list of rows" print(f"Retrieved {len(rows)} evaluation rows from Langfuse") - + # Verify each row is properly formatted for i, row in enumerate(rows): assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" assert isinstance(row.messages, list), f"Row {i} should have messages list" assert len(row.messages) > 0, f"Row {i} should have at least one message" - + # Verify messages are properly formatted for j, msg in enumerate(row.messages): assert isinstance(msg, Message), f"Row {i} message {j} should be Message object" - assert hasattr(msg, 'role'), f"Row {i} message {j} should have role" - assert msg.role in ['user', 'assistant', 'system', 'tool'], f"Row {i} message {j} has invalid role: {msg.role}" - + assert hasattr(msg, "role"), f"Row {i} message {j} should have role" + assert msg.role in [ + "user", + "assistant", + "system", + "tool", + ], f"Row {i} message {j} has invalid role: {msg.role}" + # Verify metadata if row.input_metadata: assert isinstance(row.input_metadata, InputMetadata), f"Row {i} should have InputMetadata" assert row.input_metadata.row_id, f"Row {i} should have row_id" print(f" Row {i}: ID={row.input_metadata.row_id}, Messages={len(row.messages)}") - + print(f" Row {i}: {len(row.messages)} messages, Tools={'Yes' if row.tools else 'No'}") - + @pytest.mark.skipif( - not all([ - os.getenv("LANGFUSE_PUBLIC_KEY"), - os.getenv("LANGFUSE_SECRET_KEY"), - ]), - reason="Langfuse credentials not available" + not all( + [ + os.getenv("LANGFUSE_PUBLIC_KEY"), + os.getenv("LANGFUSE_SECRET_KEY"), + ] + ), + reason="Langfuse credentials not available", ) def test_langfuse_adapter_with_filters(self): """Test Langfuse adapter with various filters.""" @@ -89,46 +99,52 @@ def test_langfuse_adapter_with_filters(self): from eval_protocol.adapters.langfuse import create_langfuse_adapter except ImportError: pytest.skip("Langfuse dependencies not installed") - + public_key, secret_key, host, project_id = self._get_langfuse_credentials() - + adapter = create_langfuse_adapter( public_key=public_key, secret_key=secret_key, host=host, project_id=project_id, ) - + # Test with time filter (last 7 days) - recent_rows = list(adapter.get_evaluation_rows( - limit=5, - from_timestamp=datetime.now() - timedelta(days=7), - include_tool_calls=True, - )) - + recent_rows = list( + adapter.get_evaluation_rows( + limit=5, + from_timestamp=datetime.now() - timedelta(days=7), + include_tool_calls=True, + ) + ) + print(f"Recent rows (last 7 days): {len(recent_rows)}") - + # Verify tool calling data is preserved tool_calling_rows = [row for row in recent_rows if row.tools] print(f"Rows with tool definitions: {len(tool_calling_rows)}") - + # Test specific filtering try: # This might not return data if no traces match, which is fine - tagged_rows = list(adapter.get_evaluation_rows( - limit=2, - tags=["production"], # May not exist, that's OK - )) + tagged_rows = list( + adapter.get_evaluation_rows( + limit=2, + tags=["production"], # May not exist, that's OK + ) + ) print(f"Tagged rows: {len(tagged_rows)}") except Exception as e: print(f"Tagged query failed (expected if no tags): {e}") - + @pytest.mark.skipif( - not all([ - os.getenv("LANGFUSE_PUBLIC_KEY"), - os.getenv("LANGFUSE_SECRET_KEY"), - ]), - reason="Langfuse credentials not available" + not all( + [ + os.getenv("LANGFUSE_PUBLIC_KEY"), + os.getenv("LANGFUSE_SECRET_KEY"), + ] + ), + reason="Langfuse credentials not available", ) def test_langfuse_conversation_analysis(self): """Test analysis of conversation types from Langfuse.""" @@ -136,51 +152,51 @@ def test_langfuse_conversation_analysis(self): from eval_protocol.adapters.langfuse import create_langfuse_adapter except ImportError: pytest.skip("Langfuse dependencies not installed") - + public_key, secret_key, host, project_id = self._get_langfuse_credentials() - + adapter = create_langfuse_adapter( public_key=public_key, secret_key=secret_key, host=host, project_id=project_id, ) - + # Get more data for analysis rows = list(adapter.get_evaluation_rows(limit=10, include_tool_calls=True)) - + # Analyze conversation patterns chat_only = [] tool_calling = [] multi_turn = [] - + for row in rows: # Check for tool calling has_tools = ( - row.tools or - any(hasattr(msg, 'tool_calls') and msg.tool_calls for msg in row.messages) or - any(msg.role == 'tool' for msg in row.messages) + row.tools + or any(hasattr(msg, "tool_calls") and msg.tool_calls for msg in row.messages) + or any(msg.role == "tool" for msg in row.messages) ) - + if has_tools: tool_calling.append(row) else: chat_only.append(row) - + # Check for multi-turn conversations if len(row.messages) > 2: # More than user + assistant multi_turn.append(row) - + print(f"Analysis of {len(rows)} conversations:") print(f" Chat-only: {len(chat_only)}") - print(f" Tool calling: {len(tool_calling)}") + print(f" Tool calling: {len(tool_calling)}") print(f" Multi-turn: {len(multi_turn)}") - + # Show example of each type if available if chat_only: row = chat_only[0] print(f" Example chat: {len(row.messages)} messages") - + if tool_calling: row = tool_calling[0] print(f" Example tool calling: {len(row.messages)} messages, {len(row.tools or [])} tools") @@ -188,220 +204,514 @@ def test_langfuse_conversation_analysis(self): class TestHuggingFaceAdapterE2E: """End-to-end tests for HuggingFace adapter with real datasets.""" - + def test_gsm8k_adapter_real_data(self): """Test loading real GSM8K data and converting to EvaluationRow.""" try: from eval_protocol.adapters.huggingface import create_huggingface_adapter except ImportError: pytest.skip("HuggingFace dependencies not installed") - + def gsm8k_transform(row: Dict[str, Any]) -> Dict[str, Any]: """Transform GSM8K row to our format.""" return { - 'messages': [ - {'role': 'system', 'content': 'You are a helpful assistant that solves math problems step by step.'}, - {'role': 'user', 'content': row['question']}, + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant that solves math problems step by step.", + }, + {"role": "user", "content": row["question"]}, ], - 'ground_truth': row['answer'], - 'metadata': { - 'dataset': 'gsm8k', - 'original_question': row['question'], - 'original_answer': row['answer'], - } + "ground_truth": row["answer"], + "metadata": { + "dataset": "gsm8k", + "original_question": row["question"], + "original_answer": row["answer"], + }, } - + # Create adapter with transform function adapter = create_huggingface_adapter( dataset_id="gsm8k", config_name="main", transform_fn=gsm8k_transform, ) - + # Test loading data rows = list(adapter.get_evaluation_rows(split="test", limit=5)) - + # Verify we got data assert len(rows) > 0, "Should retrieve some GSM8K data" print(f"Retrieved {len(rows)} GSM8K evaluation rows") - + # Verify each row is properly formatted for i, row in enumerate(rows): assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" assert isinstance(row.messages, list), f"Row {i} should have messages" assert len(row.messages) >= 2, f"Row {i} should have system + user messages" - + # Check system prompt system_msg = row.messages[0] - assert system_msg.role == 'system', f"Row {i} first message should be system" - assert 'math problems' in system_msg.content.lower(), f"Row {i} should have math system prompt" - + assert system_msg.role == "system", f"Row {i} first message should be system" + assert "math problems" in system_msg.content.lower(), f"Row {i} should have math system prompt" + # Check user question user_msg = row.messages[1] - assert user_msg.role == 'user', f"Row {i} second message should be user" + assert user_msg.role == "user", f"Row {i} second message should be user" assert len(user_msg.content) > 0, f"Row {i} should have non-empty question" - + # Check ground truth assert row.ground_truth, f"Row {i} should have ground truth answer" - + # Check metadata assert row.input_metadata, f"Row {i} should have metadata" assert row.input_metadata.dataset_info, f"Row {i} should have dataset info" - + print(f" Row {i}: Question length={len(user_msg.content)}, Answer length={len(row.ground_truth)}") - + def test_math_dataset_real_data(self): """Test loading real MATH competition dataset.""" try: from eval_protocol.adapters.huggingface import create_huggingface_adapter except ImportError: pytest.skip("HuggingFace dependencies not installed") - + def math_transform(row: Dict[str, Any]) -> Dict[str, Any]: """Transform MATH dataset row.""" return { - 'messages': [ - {'role': 'system', 'content': 'You are an expert mathematician. Solve this step by step.'}, - {'role': 'user', 'content': row['problem']}, + "messages": [ + {"role": "system", "content": "You are an expert mathematician. Solve this step by step."}, + {"role": "user", "content": row["problem"]}, ], - 'ground_truth': row['solution'], - 'metadata': { - 'dataset': 'hendrycks_math', - 'type': row.get('type', 'unknown'), - 'level': row.get('level', 'unknown'), - 'original_problem': row['problem'], - 'original_solution': row['solution'], - } + "ground_truth": row["solution"], + "metadata": { + "dataset": "hendrycks_math", + "type": row.get("type", "unknown"), + "level": row.get("level", "unknown"), + "original_problem": row["problem"], + "original_solution": row["solution"], + }, } - + # Create adapter adapter = create_huggingface_adapter( dataset_id="SuperSecureHuman/competition_math_hf_dataset", transform_fn=math_transform, ) - + # Test loading data rows = list(adapter.get_evaluation_rows(split="test", limit=3)) - + # Verify data assert len(rows) > 0, "Should retrieve MATH dataset data" print(f"Retrieved {len(rows)} MATH dataset evaluation rows") - + for i, row in enumerate(rows): assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" assert len(row.messages) >= 2, f"Row {i} should have system + user messages" assert row.ground_truth, f"Row {i} should have solution" - + # Check for MATH-specific metadata dataset_info = row.input_metadata.dataset_info - assert 'type' in dataset_info, f"Row {i} should have problem type" - assert 'level' in dataset_info, f"Row {i} should have difficulty level" - + assert "type" in dataset_info, f"Row {i} should have problem type" + assert "level" in dataset_info, f"Row {i} should have difficulty level" + print(f" Row {i}: Type={dataset_info.get('type')}, Level={dataset_info.get('level')}") - + def test_custom_dataset_transform(self): """Test adapter with a completely custom transformation.""" try: from eval_protocol.adapters.huggingface import create_huggingface_adapter except ImportError: pytest.skip("HuggingFace dependencies not installed") - + def squad_transform(row: Dict[str, Any]) -> Dict[str, Any]: """Custom transform for SQuAD dataset.""" - context = row['context'] - question = row['question'] - answers = row['answers'] - + context = row["context"] + question = row["question"] + answers = row["answers"] + # Get first answer - answer_text = answers['text'][0] if answers['text'] else "No answer" - + answer_text = answers["text"][0] if answers["text"] else "No answer" + return { - 'messages': [ - {'role': 'system', 'content': 'Answer the question based on the given context.'}, - {'role': 'user', 'content': f"Context: {context}\n\nQuestion: {question}"}, + "messages": [ + {"role": "system", "content": "Answer the question based on the given context."}, + {"role": "user", "content": f"Context: {context}\n\nQuestion: {question}"}, ], - 'ground_truth': answer_text, - 'metadata': { - 'dataset': 'squad', - 'context_length': len(context), - 'question_length': len(question), - 'num_answers': len(answers['text']), - } + "ground_truth": answer_text, + "metadata": { + "dataset": "squad", + "context_length": len(context), + "question_length": len(question), + "num_answers": len(answers["text"]), + }, } - + # Create adapter for SQuAD adapter = create_huggingface_adapter( dataset_id="squad", transform_fn=squad_transform, ) - + # Test loading rows = list(adapter.get_evaluation_rows(split="validation", limit=2)) - + assert len(rows) > 0, "Should retrieve SQuAD data" print(f"Retrieved {len(rows)} SQuAD evaluation rows") - + for i, row in enumerate(rows): assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" - user_msg = next(msg for msg in row.messages if msg.role == 'user') - assert 'Context:' in user_msg.content, f"Row {i} should have context" - assert 'Question:' in user_msg.content, f"Row {i} should have question" - + user_msg = next(msg for msg in row.messages if msg.role == "user") + assert "Context:" in user_msg.content, f"Row {i} should have context" + assert "Question:" in user_msg.content, f"Row {i} should have question" + dataset_info = row.input_metadata.dataset_info print(f" Row {i}: Context length={dataset_info.get('context_length')}") +class TestBigQueryAdapterE2E: + """End-to-end tests for BigQuery adapter with real data sources.""" + + def _get_bigquery_credentials(self): + """Get BigQuery credentials from environment.""" + project_id = os.getenv("GOOGLE_CLOUD_PROJECT") + credentials_path = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") + + return project_id, credentials_path + + @pytest.mark.skipif( + not os.getenv("GOOGLE_CLOUD_PROJECT"), reason="Google Cloud project not configured in environment" + ) + def test_bigquery_adapter_real_connection(self): + """Test that we can connect to real BigQuery and execute queries.""" + try: + from eval_protocol.adapters.bigquery import create_bigquery_adapter + except ImportError: + pytest.skip("BigQuery dependencies not installed") + + project_id, credentials_path = self._get_bigquery_credentials() + + # Define a simple transform for testing + def test_transform(row: Dict[str, Any]) -> Dict[str, Any]: + """Transform test query results to evaluation format.""" + return { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": str(row.get("text", "Test query"))}, + ], + "ground_truth": str(row.get("label", "test")), + "metadata": { + "source": "bigquery", + "row_data": dict(row), + }, + } + + # Create adapter + adapter = create_bigquery_adapter( + transform_fn=test_transform, + dataset_id=project_id, + credentials_path=credentials_path, + ) + + # Test with a simple query that should work on any BigQuery project + # Using INFORMATION_SCHEMA which is available in all projects + query = """ + SELECT + 'test_text' as text, + 'test_label' as label, + CURRENT_TIMESTAMP() as created_at, + 1 as id + LIMIT 3 + """ + + # Execute query and get rows + rows = list( + adapter.get_evaluation_rows( + query=query, + limit=2, + model_name="gpt-3.5-turbo", + temperature=0.0, + ) + ) + + # Verify we got data + assert len(rows) > 0, "Should retrieve data from BigQuery" + print(f"Retrieved {len(rows)} evaluation rows from BigQuery") + + # Verify each row is properly formatted + for i, row in enumerate(rows): + assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" + assert isinstance(row.messages, list), f"Row {i} should have messages list" + assert len(row.messages) >= 2, f"Row {i} should have system + user messages" + + # Check system and user messages + system_msg = row.messages[0] + user_msg = row.messages[1] + assert system_msg.role == "system", f"Row {i} first message should be system" + assert user_msg.role == "user", f"Row {i} second message should be user" + + # Verify metadata + assert row.input_metadata, f"Row {i} should have metadata" + assert row.input_metadata.row_id, f"Row {i} should have row_id" + + # Check BigQuery-specific metadata + dataset_info = row.input_metadata.dataset_info + assert dataset_info["source"] == "bigquery", f"Row {i} should have BigQuery source" + + print(f" Row {i}: ID={row.input_metadata.row_id}, Messages={len(row.messages)}") + + @pytest.mark.skipif(not os.getenv("GOOGLE_CLOUD_PROJECT"), reason="Google Cloud project not configured") + def test_bigquery_advanced_features(self): + """Test advanced BigQuery adapter features like parameterized queries.""" + try: + from google.cloud import bigquery + + from eval_protocol.adapters.bigquery import create_bigquery_adapter + except ImportError: + pytest.skip("BigQuery dependencies not installed") + + project_id, credentials_path = self._get_bigquery_credentials() + + def transform_fn(row): + return { + "messages": [{"role": "user", "content": str(row["content"])}], + "ground_truth": str(row["label"]), + "metadata": {"category": row.get("category", "unknown")}, + } + + adapter = create_bigquery_adapter( + transform_fn=transform_fn, + dataset_id=project_id, + credentials_path=credentials_path, + ) + + # Test parameterized query + query = """ + SELECT + @prefix || ' example content' as content, + 'test_label' as label, + @category as category + """ + + query_params = [ + bigquery.ScalarQueryParameter("prefix", "STRING", "BigQuery"), + bigquery.ScalarQueryParameter("category", "STRING", "test_data"), + ] + + rows = list( + adapter.get_evaluation_rows( + query=query, + query_params=query_params, + limit=1, + ) + ) + + assert len(rows) == 1, "Should retrieve parameterized query result" + row = rows[0] + + user_msg = row.messages[0] + assert "BigQuery example content" in user_msg.content + assert row.ground_truth == "test_label" + + print(f"Parameterized query test: '{user_msg.content}' -> '{row.ground_truth}'") + + @pytest.mark.skipif( + not os.getenv("GOOGLE_CLOUD_PROJECT"), reason="Google Cloud project required to query public datasets" + ) + def test_bigquery_public_dataset_google_books_ngrams(self): + """Test BigQuery adapter with a public dataset to test specific logic.""" + try: + from eval_protocol.adapters.bigquery import create_bigquery_adapter + except ImportError: + pytest.skip("BigQuery dependencies not installed") + + # Get user's project credentials (needed to run the query job) + project_id, credentials_path = self._get_bigquery_credentials() + + def google_books_transform(row: Dict[str, Any]) -> Dict[str, Any]: + """Transform Google Books ngrams data to evaluation format.""" + term = str(row.get("term", "")) + term_frequency = row.get("term_frequency", 0) + document_frequency = row.get("document_frequency", 0) + tokens = row.get("tokens", []) # This is a REPEATED field (array) + has_tag = row.get("has_tag", False) + years = row.get("years", []) # This is a REPEATED RECORD (array of objects) + + # Create an educational question about the term + system_prompt = ( + """You are a linguistics expert who helps explain word usage patterns from Google Books data.""" + ) + + # Create a question about the term's usage + if tokens and len(tokens) > 0: + tokens_str = ", ".join(str(token) for token in tokens[:3]) # Take first 3 tokens + question = f"What can you tell me about the term '{term}' and its linguistic tokens: {tokens_str}?" + else: + question = f"What can you tell me about the Chinese term '{term}' based on its usage patterns?" + + # Create ground truth based on frequency data + frequency_desc = ( + "high frequency" + if term_frequency > 1000 + else "moderate frequency" if term_frequency > 100 else "low frequency" + ) + document_desc = ( + f"appears in {document_frequency} documents" if document_frequency > 0 else "rare occurrence" + ) + + ground_truth = ( + f"The term '{term}' has {frequency_desc} usage ({term_frequency} occurrences) and {document_desc}." + ) + if has_tag: + ground_truth += " This term has special linguistic tags." + + return { + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": question}, + ], + "ground_truth": ground_truth, + "metadata": { + "dataset": "google_books_ngrams_chi_sim", + "term": term, + "term_frequency": term_frequency, + "document_frequency": document_frequency, + "num_tokens": len(tokens) if tokens else 0, + "has_tag": has_tag, + "num_year_records": len(years) if years else 0, + "tokens_sample": tokens[:3] if tokens else [], # Store first 3 tokens as sample + }, + } + + # Create adapter - use YOUR project to run the job, but query PUBLIC data + adapter = create_bigquery_adapter( + transform_fn=google_books_transform, + dataset_id=project_id, # YOUR project (to run the job) + credentials_path=credentials_path, + ) + + # Query the public Google Books ngrams dataset (full table reference in SQL) + query = """ + SELECT + term, + term_frequency, + document_frequency, + tokens, + has_tag, + years + FROM `bigquery-public-data.google_books_ngrams_2020.chi_sim_1` + WHERE term_frequency > 100 + AND document_frequency > 5 + AND LENGTH(term) >= 2 + ORDER BY term_frequency DESC + LIMIT 10 + """ + + # Execute query and get rows + rows = list( + adapter.get_evaluation_rows( + query=query, + limit=3, + model_name="gpt-4", + temperature=0.0, + ) + ) + + # Verify we got data + assert len(rows) > 0, "Should retrieve data from Google Books ngrams dataset" + print(f"Retrieved {len(rows)} evaluation rows from Google Books ngrams") + + # Verify each row is properly formatted + for i, row in enumerate(rows): + assert isinstance(row, EvaluationRow), f"Row {i} should be EvaluationRow" + assert isinstance(row.messages, list), f"Row {i} should have messages list" + assert len(row.messages) >= 2, f"Row {i} should have system + user messages" + + # Check message content + system_msg = row.messages[0] + user_msg = row.messages[1] + assert system_msg.role == "system", f"Row {i} first message should be system" + assert user_msg.role == "user", f"Row {i} second message should be user" + assert "linguistics expert" in system_msg.content, f"Row {i} should have linguistics system prompt" + assert "term" in user_msg.content, f"Row {i} should ask about the term" + + # Verify ground truth + assert row.ground_truth, f"Row {i} should have ground truth" + assert "frequency" in row.ground_truth, f"Row {i} should mention frequency" + + # Verify metadata + assert row.input_metadata, f"Row {i} should have metadata" + dataset_info = row.input_metadata.dataset_info + assert dataset_info["dataset"] == "google_books_ngrams_chi_sim", f"Row {i} should have correct dataset" + assert "term" in dataset_info, f"Row {i} should have term in metadata" + assert "term_frequency" in dataset_info, f"Row {i} should have frequency in metadata" + assert "num_tokens" in dataset_info, f"Row {i} should have token count in metadata" + + # Check repeated fields handling + term = dataset_info["term"] + term_freq = dataset_info["term_frequency"] + doc_freq = dataset_info["document_frequency"] + num_tokens = dataset_info["num_tokens"] + + print(f" Row {i}: Term='{term}', Frequency={term_freq}, Docs={doc_freq}, Tokens={num_tokens}") + + # Verify filtering worked (should have high frequency terms) + assert term_freq > 100, f"Row {i} should have term frequency > 100" + assert doc_freq > 5, f"Row {i} should have document frequency > 5" + + def test_adapters_integration(): """Test that adapters work with evaluation pipeline.""" print("Testing adapter integration with evaluation pipeline...") - + # This test doesn't require external credentials try: from eval_protocol.adapters.huggingface import create_huggingface_adapter from eval_protocol.rewards.accuracy import accuracy_reward except ImportError as e: pytest.skip(f"Dependencies not available: {e}") - + def simple_transform(row: Dict[str, Any]) -> Dict[str, Any]: """Simple transform for testing.""" return { - 'messages': [ - {'role': 'user', 'content': row['question']}, - {'role': 'assistant', 'content': 'Test response'}, # Simulated response + "messages": [ + {"role": "user", "content": row["question"]}, + {"role": "assistant", "content": "Test response"}, # Simulated response ], - 'ground_truth': row['answer'], - 'metadata': {'test': True} + "ground_truth": row["answer"], + "metadata": {"test": True}, } - + # Create adapter with GSM8K (small sample) adapter = create_huggingface_adapter( dataset_id="gsm8k", - config_name="main", + config_name="main", transform_fn=simple_transform, ) - + # Get one row rows = list(adapter.get_evaluation_rows(split="test", limit=1)) assert len(rows) == 1, "Should get exactly one row" - + row = rows[0] - + # Test evaluation result = accuracy_reward( messages=row.messages, ground_truth=row.ground_truth, ) - - assert hasattr(result, 'score'), "Should have evaluation score" + + assert hasattr(result, "score"), "Should have evaluation score" assert 0 <= result.score <= 1, "Score should be between 0 and 1" - + print(f"Integration test successful: Score={result.score}") if __name__ == "__main__": # Run tests manually for development import sys - + print("Running Langfuse E2E tests...") if all([os.getenv("LANGFUSE_PUBLIC_KEY"), os.getenv("LANGFUSE_SECRET_KEY")]): try: @@ -415,20 +725,20 @@ def simple_transform(row: Dict[str, Any]) -> Dict[str, Any]: print(" This is expected if Langfuse API has changed - the adapter needs updating") else: print("⚠️ Skipping Langfuse tests (credentials not available)") - + print("\nRunning HuggingFace E2E tests...") try: test_hf = TestHuggingFaceAdapterE2E() test_hf.test_gsm8k_adapter_real_data() print("✅ GSM8K adapter test passed!") - + # Skip MATH dataset test for now (dataset may not be available) try: test_hf.test_math_dataset_real_data() print("✅ MATH dataset test passed!") except Exception as e: print(f"⚠️ MATH dataset test failed (dataset may not be available): {e}") - + # Skip SQuAD test for now (focus on core functionality) try: test_hf.test_custom_dataset_transform() @@ -439,9 +749,15 @@ def simple_transform(row: Dict[str, Any]) -> Dict[str, Any]: except Exception as e: print(f"❌ HuggingFace tests failed: {e}") sys.exit(1) - - print("\nRunning integration test...") - test_adapters_integration() - print("✅ Integration test passed!") - - print("\n🎉 All E2E tests completed successfully!") \ No newline at end of file + + print("\nRunning BigQuery E2E test...") + try: + test_bq = TestBigQueryAdapterE2E() + # Only test the public Google Books ngrams dataset (no auth required) + test_bq.test_bigquery_public_dataset_google_books_ngrams() + print("✅ BigQuery Google Books ngrams test passed!") + + except Exception as e: + print(f"❌ BigQuery test failed: {e}") + + print("\n🎉 BigQuery E2E test completed successfully!") diff --git a/uv.lock b/uv.lock index d439a47a..0c27bafe 100644 --- a/uv.lock +++ b/uv.lock @@ -585,6 +585,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/84/c2/80633736cd183ee4a62107413def345f7e6e3c01563dbca1417363cf957e/build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5", size = 22950, upload-time = "2024-10-06T17:22:23.299Z" }, ] +[[package]] +name = "cachetools" +version = "5.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380, upload-time = "2025-02-20T21:01:19.524Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080, upload-time = "2025-02-20T21:01:16.647Z" }, +] + [[package]] name = "certifi" version = "2025.7.14" @@ -1158,9 +1167,17 @@ dependencies = [ [package.optional-dependencies] adapters = [ { name = "datasets" }, + { name = "google-auth" }, + { name = "google-auth-oauthlib" }, + { name = "google-cloud-bigquery" }, { name = "langfuse" }, { name = "transformers" }, ] +bigquery = [ + { name = "google-auth" }, + { name = "google-auth-oauthlib" }, + { name = "google-cloud-bigquery" }, +] box2d = [ { name = "gymnasium", extra = ["box2d"] }, { name = "pillow" }, @@ -1248,6 +1265,12 @@ requires-dist = [ { name = "fireworks-ai", marker = "extra == 'fireworks'", specifier = ">=0.19.12" }, { name = "flake8", marker = "extra == 'dev'", specifier = ">=3.9.2" }, { name = "fsspec" }, + { name = "google-auth", marker = "extra == 'adapters'", specifier = ">=2.0.0" }, + { name = "google-auth", marker = "extra == 'bigquery'", specifier = ">=2.0.0" }, + { name = "google-auth-oauthlib", marker = "extra == 'adapters'", specifier = ">=1.0.0" }, + { name = "google-auth-oauthlib", marker = "extra == 'bigquery'", specifier = ">=1.0.0" }, + { name = "google-cloud-bigquery", marker = "extra == 'adapters'", specifier = ">=3.0.0" }, + { name = "google-cloud-bigquery", marker = "extra == 'bigquery'", specifier = ">=3.0.0" }, { name = "gymnasium", specifier = ">=0.29.0" }, { name = "gymnasium", extras = ["box2d"], marker = "extra == 'box2d'", specifier = ">=0.29.0" }, { name = "haikus", marker = "extra == 'dev'", specifier = "==0.3.8" }, @@ -1304,7 +1327,7 @@ requires-dist = [ { name = "websockets", specifier = ">=15.0.1" }, { name = "werkzeug", marker = "extra == 'dev'", specifier = ">=2.0.0" }, ] -provides-extras = ["dev", "trl", "openevals", "fireworks", "box2d", "langfuse", "huggingface", "adapters", "svgbench"] +provides-extras = ["dev", "trl", "openevals", "fireworks", "box2d", "langfuse", "huggingface", "bigquery", "adapters", "svgbench"] [package.metadata.requires-dev] dev = [ @@ -1612,6 +1635,133 @@ http = [ { name = "aiohttp" }, ] +[[package]] +name = "google-api-core" +version = "2.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "googleapis-common-protos" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/21/e9d043e88222317afdbdb567165fdbc3b0aad90064c7e0c9eb0ad9955ad8/google_api_core-2.25.1.tar.gz", hash = "sha256:d2aaa0b13c78c61cb3f4282c464c046e45fbd75755683c9c525e6e8f7ed0a5e8", size = 165443, upload-time = "2025-06-12T20:52:20.439Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/4b/ead00905132820b623732b175d66354e9d3e69fcf2a5dcdab780664e7896/google_api_core-2.25.1-py3-none-any.whl", hash = "sha256:8a2a56c1fef82987a524371f99f3bd0143702fecc670c72e600c1cda6bf8dbb7", size = 160807, upload-time = "2025-06-12T20:52:19.334Z" }, +] + +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, + { name = "grpcio-status" }, +] + +[[package]] +name = "google-auth" +version = "2.40.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/9b/e92ef23b84fa10a64ce4831390b7a4c2e53c0132568d99d4ae61d04c8855/google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77", size = 281029, upload-time = "2025-06-04T18:04:57.577Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/63/b19553b658a1692443c62bd07e5868adaa0ad746a0751ba62c59568cd45b/google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca", size = 216137, upload-time = "2025-06-04T18:04:55.573Z" }, +] + +[[package]] +name = "google-auth-oauthlib" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/87/e10bf24f7bcffc1421b84d6f9c3377c30ec305d082cd737ddaa6d8f77f7c/google_auth_oauthlib-1.2.2.tar.gz", hash = "sha256:11046fb8d3348b296302dd939ace8af0a724042e8029c1b872d87fabc9f41684", size = 20955, upload-time = "2025-04-22T16:40:29.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/84/40ee070be95771acd2f4418981edb834979424565c3eec3cd88b6aa09d24/google_auth_oauthlib-1.2.2-py3-none-any.whl", hash = "sha256:fd619506f4b3908b5df17b65f39ca8d66ea56986e5472eb5978fd8f3786f00a2", size = 19072, upload-time = "2025-04-22T16:40:28.174Z" }, +] + +[[package]] +name = "google-cloud-bigquery" +version = "3.35.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core", extra = ["grpc"] }, + { name = "google-auth" }, + { name = "google-cloud-core" }, + { name = "google-resumable-media" }, + { name = "packaging" }, + { name = "python-dateutil" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/44/e4/9cf03fa81fefd1b9811a7cd6e398804ae0de3b6a4edef810e2acd45cabbc/google_cloud_bigquery-3.35.1.tar.gz", hash = "sha256:599f26cacf190acfe88000f6cc5f4bc9e6baac7899e4f406ca054f1906f71960", size = 496433, upload-time = "2025-07-24T15:09:04.108Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/50/96fe9bc5b83d3a421e91ed8edc2535de45957e9af398273e3ecb5c3a1094/google_cloud_bigquery-3.35.1-py3-none-any.whl", hash = "sha256:6739a6ba63c6d80735ca2b34b1df2090ff473b80c1a62354caa2debe6dbbd961", size = 256877, upload-time = "2025-07-24T15:09:02.443Z" }, +] + +[[package]] +name = "google-cloud-core" +version = "2.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/b8/2b53838d2acd6ec6168fd284a990c76695e84c65deee79c9f3a4276f6b4f/google_cloud_core-2.4.3.tar.gz", hash = "sha256:1fab62d7102844b278fe6dead3af32408b1df3eb06f5c7e8634cbd40edc4da53", size = 35861, upload-time = "2025-03-10T21:05:38.948Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/86/bda7241a8da2d28a754aad2ba0f6776e35b67e37c36ae0c45d49370f1014/google_cloud_core-2.4.3-py2.py3-none-any.whl", hash = "sha256:5130f9f4c14b4fafdff75c79448f9495cfade0d8775facf1b09c3bf67e027f6e", size = 29348, upload-time = "2025-03-10T21:05:37.785Z" }, +] + +[[package]] +name = "google-crc32c" +version = "1.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/ae/87802e6d9f9d69adfaedfcfd599266bf386a54d0be058b532d04c794f76d/google_crc32c-1.7.1.tar.gz", hash = "sha256:2bff2305f98846f3e825dbeec9ee406f89da7962accdb29356e4eadc251bd472", size = 14495, upload-time = "2025-03-26T14:29:13.32Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/69/b1b05cf415df0d86691d6a8b4b7e60ab3a6fb6efb783ee5cd3ed1382bfd3/google_crc32c-1.7.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:b07d48faf8292b4db7c3d64ab86f950c2e94e93a11fd47271c28ba458e4a0d76", size = 30467, upload-time = "2025-03-26T14:31:11.92Z" }, + { url = "https://files.pythonhosted.org/packages/44/3d/92f8928ecd671bd5b071756596971c79d252d09b835cdca5a44177fa87aa/google_crc32c-1.7.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7cc81b3a2fbd932a4313eb53cc7d9dde424088ca3a0337160f35d91826880c1d", size = 30311, upload-time = "2025-03-26T14:53:14.161Z" }, + { url = "https://files.pythonhosted.org/packages/33/42/c2d15a73df79d45ed6b430b9e801d0bd8e28ac139a9012d7d58af50a385d/google_crc32c-1.7.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c67ca0a1f5b56162951a9dae987988679a7db682d6f97ce0f6381ebf0fbea4c", size = 37889, upload-time = "2025-03-26T14:41:27.83Z" }, + { url = "https://files.pythonhosted.org/packages/57/ea/ac59c86a3c694afd117bb669bde32aaf17d0de4305d01d706495f09cbf19/google_crc32c-1.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc5319db92daa516b653600794d5b9f9439a9a121f3e162f94b0e1891c7933cb", size = 33028, upload-time = "2025-03-26T14:41:29.141Z" }, + { url = "https://files.pythonhosted.org/packages/60/44/87e77e8476767a4a93f6cf271157c6d948eacec63688c093580af13b04be/google_crc32c-1.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcdf5a64adb747610140572ed18d011896e3b9ae5195f2514b7ff678c80f1603", size = 38026, upload-time = "2025-03-26T14:41:29.921Z" }, + { url = "https://files.pythonhosted.org/packages/c8/bf/21ac7bb305cd7c1a6de9c52f71db0868e104a5b573a4977cd9d0ff830f82/google_crc32c-1.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:754561c6c66e89d55754106739e22fdaa93fafa8da7221b29c8b8e8270c6ec8a", size = 33476, upload-time = "2025-03-26T14:29:09.086Z" }, + { url = "https://files.pythonhosted.org/packages/f7/94/220139ea87822b6fdfdab4fb9ba81b3fff7ea2c82e2af34adc726085bffc/google_crc32c-1.7.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:6fbab4b935989e2c3610371963ba1b86afb09537fd0c633049be82afe153ac06", size = 30468, upload-time = "2025-03-26T14:32:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/94/97/789b23bdeeb9d15dc2904660463ad539d0318286d7633fe2760c10ed0c1c/google_crc32c-1.7.1-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:ed66cbe1ed9cbaaad9392b5259b3eba4a9e565420d734e6238813c428c3336c9", size = 30313, upload-time = "2025-03-26T14:57:38.758Z" }, + { url = "https://files.pythonhosted.org/packages/81/b8/976a2b843610c211e7ccb3e248996a61e87dbb2c09b1499847e295080aec/google_crc32c-1.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee6547b657621b6cbed3562ea7826c3e11cab01cd33b74e1f677690652883e77", size = 33048, upload-time = "2025-03-26T14:41:30.679Z" }, + { url = "https://files.pythonhosted.org/packages/c9/16/a3842c2cf591093b111d4a5e2bfb478ac6692d02f1b386d2a33283a19dc9/google_crc32c-1.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d68e17bad8f7dd9a49181a1f5a8f4b251c6dbc8cc96fb79f1d321dfd57d66f53", size = 32669, upload-time = "2025-03-26T14:41:31.432Z" }, + { url = "https://files.pythonhosted.org/packages/04/17/ed9aba495916fcf5fe4ecb2267ceb851fc5f273c4e4625ae453350cfd564/google_crc32c-1.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:6335de12921f06e1f774d0dd1fbea6bf610abe0887a1638f64d694013138be5d", size = 33476, upload-time = "2025-03-26T14:29:10.211Z" }, + { url = "https://files.pythonhosted.org/packages/dd/b7/787e2453cf8639c94b3d06c9d61f512234a82e1d12d13d18584bd3049904/google_crc32c-1.7.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2d73a68a653c57281401871dd4aeebbb6af3191dcac751a76ce430df4d403194", size = 30470, upload-time = "2025-03-26T14:34:31.655Z" }, + { url = "https://files.pythonhosted.org/packages/ed/b4/6042c2b0cbac3ec3a69bb4c49b28d2f517b7a0f4a0232603c42c58e22b44/google_crc32c-1.7.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:22beacf83baaf59f9d3ab2bbb4db0fb018da8e5aebdce07ef9f09fce8220285e", size = 30315, upload-time = "2025-03-26T15:01:54.634Z" }, + { url = "https://files.pythonhosted.org/packages/29/ad/01e7a61a5d059bc57b702d9ff6a18b2585ad97f720bd0a0dbe215df1ab0e/google_crc32c-1.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19eafa0e4af11b0a4eb3974483d55d2d77ad1911e6cf6f832e1574f6781fd337", size = 33180, upload-time = "2025-03-26T14:41:32.168Z" }, + { url = "https://files.pythonhosted.org/packages/3b/a5/7279055cf004561894ed3a7bfdf5bf90a53f28fadd01af7cd166e88ddf16/google_crc32c-1.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d86616faaea68101195c6bdc40c494e4d76f41e07a37ffdef270879c15fb65", size = 32794, upload-time = "2025-03-26T14:41:33.264Z" }, + { url = "https://files.pythonhosted.org/packages/0f/d6/77060dbd140c624e42ae3ece3df53b9d811000729a5c821b9fd671ceaac6/google_crc32c-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:b7491bdc0c7564fcf48c0179d2048ab2f7c7ba36b84ccd3a3e1c3f7a72d3bba6", size = 33477, upload-time = "2025-03-26T14:29:10.94Z" }, + { url = "https://files.pythonhosted.org/packages/8b/72/b8d785e9184ba6297a8620c8a37cf6e39b81a8ca01bb0796d7cbb28b3386/google_crc32c-1.7.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:df8b38bdaf1629d62d51be8bdd04888f37c451564c2042d36e5812da9eff3c35", size = 30467, upload-time = "2025-03-26T14:36:06.909Z" }, + { url = "https://files.pythonhosted.org/packages/34/25/5f18076968212067c4e8ea95bf3b69669f9fc698476e5f5eb97d5b37999f/google_crc32c-1.7.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:e42e20a83a29aa2709a0cf271c7f8aefaa23b7ab52e53b322585297bb94d4638", size = 30309, upload-time = "2025-03-26T15:06:15.318Z" }, + { url = "https://files.pythonhosted.org/packages/92/83/9228fe65bf70e93e419f38bdf6c5ca5083fc6d32886ee79b450ceefd1dbd/google_crc32c-1.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:905a385140bf492ac300026717af339790921f411c0dfd9aa5a9e69a08ed32eb", size = 33133, upload-time = "2025-03-26T14:41:34.388Z" }, + { url = "https://files.pythonhosted.org/packages/c3/ca/1ea2fd13ff9f8955b85e7956872fdb7050c4ace8a2306a6d177edb9cf7fe/google_crc32c-1.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b211ddaf20f7ebeec5c333448582c224a7c90a9d98826fbab82c0ddc11348e6", size = 32773, upload-time = "2025-03-26T14:41:35.19Z" }, + { url = "https://files.pythonhosted.org/packages/89/32/a22a281806e3ef21b72db16f948cad22ec68e4bdd384139291e00ff82fe2/google_crc32c-1.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:0f99eaa09a9a7e642a61e06742856eec8b19fc0037832e03f941fe7cf0c8e4db", size = 33475, upload-time = "2025-03-26T14:29:11.771Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c5/002975aff514e57fc084ba155697a049b3f9b52225ec3bc0f542871dd524/google_crc32c-1.7.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32d1da0d74ec5634a05f53ef7df18fc646666a25efaaca9fc7dcfd4caf1d98c3", size = 33243, upload-time = "2025-03-26T14:41:35.975Z" }, + { url = "https://files.pythonhosted.org/packages/61/cb/c585282a03a0cea70fcaa1bf55d5d702d0f2351094d663ec3be1c6c67c52/google_crc32c-1.7.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e10554d4abc5238823112c2ad7e4560f96c7bf3820b202660373d769d9e6e4c9", size = 32870, upload-time = "2025-03-26T14:41:37.08Z" }, + { url = "https://files.pythonhosted.org/packages/0b/43/31e57ce04530794917dfe25243860ec141de9fadf4aa9783dffe7dac7c39/google_crc32c-1.7.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8e9afc74168b0b2232fb32dd202c93e46b7d5e4bf03e66ba5dc273bb3559589", size = 28242, upload-time = "2025-03-26T14:41:42.858Z" }, + { url = "https://files.pythonhosted.org/packages/eb/f3/8b84cd4e0ad111e63e30eb89453f8dd308e3ad36f42305cf8c202461cdf0/google_crc32c-1.7.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa8136cc14dd27f34a3221c0f16fd42d8a40e4778273e61a3c19aedaa44daf6b", size = 28049, upload-time = "2025-03-26T14:41:44.651Z" }, + { url = "https://files.pythonhosted.org/packages/16/1b/1693372bf423ada422f80fd88260dbfd140754adb15cbc4d7e9a68b1cb8e/google_crc32c-1.7.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85fef7fae11494e747c9fd1359a527e5970fc9603c90764843caabd3a16a0a48", size = 28241, upload-time = "2025-03-26T14:41:45.898Z" }, + { url = "https://files.pythonhosted.org/packages/fd/3c/2a19a60a473de48717b4efb19398c3f914795b64a96cf3fbe82588044f78/google_crc32c-1.7.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6efb97eb4369d52593ad6f75e7e10d053cf00c48983f7a973105bc70b0ac4d82", size = 28048, upload-time = "2025-03-26T14:41:46.696Z" }, +] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-crc32c" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/5a/0efdc02665dca14e0837b62c8a1a93132c264bd02054a15abb2218afe0ae/google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0", size = 2163099, upload-time = "2024-08-07T22:20:38.555Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/35/b8d3baf8c46695858cb9d8835a53baa1eeb9906ddaf2f728a5f5b640fd1e/google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa", size = 81251, upload-time = "2024-08-07T22:20:36.409Z" }, +] + [[package]] name = "googleapis-common-protos" version = "1.70.0" @@ -1723,6 +1873,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214, upload-time = "2025-07-24T18:53:59.771Z" }, ] +[[package]] +name = "grpcio-status" +version = "1.71.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/d1/b6e9877fedae3add1afdeae1f89d1927d296da9cf977eca0eb08fb8a460e/grpcio_status-1.71.2.tar.gz", hash = "sha256:c7a97e176df71cdc2c179cd1847d7fc86cca5832ad12e9798d7fed6b7a1aab50", size = 13677, upload-time = "2025-06-28T04:24:05.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/58/317b0134129b556a93a3b0afe00ee675b5657f0155509e22fcb853bafe2d/grpcio_status-1.71.2-py3-none-any.whl", hash = "sha256:803c98cb6a8b7dc6dbb785b1111aed739f241ab5e9da0bba96888aa74704cfd3", size = 14424, upload-time = "2025-06-28T04:23:42.136Z" }, +] + [[package]] name = "grpclib" version = "0.4.8" @@ -3555,6 +3719,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1", size = 89265, upload-time = "2024-10-01T17:00:38.172Z" }, ] +[[package]] +name = "oauthlib" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, +] + [[package]] name = "omegaconf" version = "2.3.0" @@ -4203,6 +4376,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, ] +[[package]] +name = "proto-plus" +version = "1.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142, upload-time = "2025-03-10T15:54:38.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163, upload-time = "2025-03-10T15:54:37.335Z" }, +] + [[package]] name = "protobuf" version = "5.29.3" @@ -4293,6 +4478,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/4e/519c1bc1876625fe6b71e9a28287c43ec2f20f73c658b9ae1d485c0c206e/pyarrow-21.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:222c39e2c70113543982c6b34f3077962b44fca38c0bd9e68bb6781534425c10", size = 26371006, upload-time = "2025-07-18T00:56:56.379Z" }, ] +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + [[package]] name = "pycares" version = "4.9.0" @@ -4972,6 +5178,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, ] +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + [[package]] name = "requests-toolbelt" version = "1.0.0" @@ -5316,6 +5535,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c8/ed/9de62c2150ca8e2e5858acf3f4f4d0d180a38feef9fdab4078bea63d8dba/rpds_py-0.26.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e99685fc95d386da368013e7fb4269dd39c30d99f812a8372d62f244f662709c", size = 555334, upload-time = "2025-07-01T15:56:51.703Z" }, ] +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + [[package]] name = "ruff" version = "0.9.10"