diff --git a/src/trustshell/__init__.py b/src/trustshell/__init__.py index 66283db..d926c92 100644 --- a/src/trustshell/__init__.py +++ b/src/trustshell/__init__.py @@ -3,9 +3,8 @@ import logging import os import sys -import asyncio from urllib.parse import urlparse, urlunparse, quote, parse_qs -from typing import Optional, Any, cast +from typing import Optional, Any import httpx import jwt @@ -309,7 +308,7 @@ def launch_browser(code_challenge: str, state: str) -> None: webbrowser.open(url) -async def paginated_trustify_query_async( +def paginated_trustify_query( endpoint: str, base_params: dict[str, Any], auth_header: dict[str, str], @@ -317,7 +316,7 @@ async def paginated_trustify_query_async( limit: int = 100, ) -> dict[str, Any]: """ - Perform a paginated query to a Trustify API endpoint using parallel requests. + Perform a paginated query to a Trustify API endpoint using sequential requests. Args: endpoint: The API endpoint URL @@ -330,12 +329,12 @@ async def paginated_trustify_query_async( dict with 'items' and 'total' keys containing all paginated results """ - async def make_request_with_retry( - client: httpx.AsyncClient, query_params: dict[str, Any], headers: dict[str, str] + def make_request_with_retry( + client: httpx.Client, query_params: dict[str, Any], headers: dict[str, str] ) -> httpx.Response: - """Make async HTTP request with 401 retry logic""" + """Make HTTP request with 401 retry logic""" try: - response = await client.get( + response = client.get( endpoint, params=query_params, headers=headers, timeout=2400 ) response.raise_for_status() @@ -347,19 +346,17 @@ async def make_request_with_retry( new_access_token = get_access_token() if new_access_token: headers["Authorization"] = f"Bearer {new_access_token}" - response = await client.get( + response = client.get( endpoint, params=query_params, headers=headers, timeout=300 ) response.raise_for_status() return response raise - async with httpx.AsyncClient() as client: + with httpx.Client() as client: # First request to get total count query_params = {**base_params, "limit": limit, "offset": 0} - first_response = await make_request_with_retry( - client, query_params, auth_header - ) + first_response = make_request_with_retry(client, query_params, auth_header) first_result = first_response.json() total_available = first_result.get("total", 0) @@ -370,51 +367,19 @@ async def make_request_with_retry( all_items = first_result.get("items", []) - # Calculate remaining pages needed - remaining_items = total_available - len(all_items) - if remaining_items <= 0: - # All items fit in first page - if component_name: - console.print( - f"Retrieved {len(all_items)} items out of {total_available} total for {component_name}" - ) - return {"items": all_items, "total": total_available} - - # Calculate offsets for remaining pages - remaining_pages = [] + # Fetch remaining pages sequentially offset = limit while offset < total_available: - remaining_pages.append(offset) - offset += limit - - # Make parallel requests for remaining pages - async def fetch_page(page_offset: int) -> list[Any]: - """Fetch a single page of results""" - page_params = {**base_params, "limit": limit, "offset": page_offset} + page_params = {**base_params, "limit": limit, "offset": offset} try: - response = await make_request_with_retry( - client, page_params, auth_header - ) + response = make_request_with_retry(client, page_params, auth_header) result = response.json() - return cast(list[Any], result.get("items", [])) + page_items = result.get("items", []) + all_items.extend(page_items) + offset += limit except Exception as e: - logger.error(f"Error fetching page at offset {page_offset}: {e}") - return [] - - # Execute all remaining page requests in parallel - if remaining_pages: - page_results = await asyncio.gather( - *[fetch_page(offset) for offset in remaining_pages], - return_exceptions=True, - ) - - # Combine results from all pages - for page_items in page_results: - if isinstance(page_items, list): - all_items.extend(page_items) - else: - # Handle exceptions from gather - logger.error(f"Error in parallel page fetch: {page_items}") + logger.error(f"Error fetching page at offset {offset}: {e}") + break if component_name: console.print( @@ -424,36 +389,6 @@ async def fetch_page(page_offset: int) -> list[Any]: return {"items": all_items, "total": total_available} -def paginated_trustify_query( - endpoint: str, - base_params: dict[str, Any], - auth_header: dict[str, str], - component_name: str = "", - limit: int = 100, -) -> dict[str, Any]: - """ - Perform a paginated query to a Trustify API endpoint. - - This is a synchronous wrapper around the async implementation that uses - parallel requests for better performance. - - Args: - endpoint: The API endpoint URL - base_params: Base query parameters (will add limit/offset) - auth_header: Authentication headers - component_name: Component name for progress messages (optional) - limit: Number of items per request - - Returns: - dict with 'items' and 'total' keys containing all paginated results - """ - return asyncio.run( - paginated_trustify_query_async( - endpoint, base_params, auth_header, component_name, limit - ) - ) - - def render_tree(root: Node) -> None: """Pretty print a tree using name only""" for pre, _, node in RenderTree(root): diff --git a/src/trustshell/products.py b/src/trustshell/products.py index f76b91f..a993adb 100644 --- a/src/trustshell/products.py +++ b/src/trustshell/products.py @@ -96,7 +96,7 @@ def prime_cache(check: bool, debug: bool) -> None: @click.option( "--versions", "-v", is_flag=True, default=False, help="Show PURL versions." ) -@click.option("--latest", "-l", is_flag=True, default=False) +@click.option("--latest", "-l", is_flag=True, default=True) @click.option("--cpes", "-c", is_flag=True, default=False) @click.option( "--include-rpm-containers", diff --git a/uv.lock b/uv.lock index 0fc949c..4264049 100644 --- a/uv.lock +++ b/uv.lock @@ -712,7 +712,7 @@ wheels = [ [[package]] name = "trustshell" -version = "0.2.1" +version = "0.2.2" source = { editable = "." } dependencies = [ { name = "anytree" },