|
| 1 | +import time |
1 | 2 | import uuid |
2 | 3 | from collections.abc import Iterator |
3 | 4 | from dataclasses import asdict, dataclass |
4 | | -from datetime import datetime |
| 5 | +from datetime import datetime, timedelta |
5 | 6 | from typing import Optional |
6 | 7 |
|
7 | 8 | from rich.text import Text |
8 | 9 |
|
9 | | -from dstack._internal.cli.utils.common import console |
| 10 | +from dstack._internal.cli.utils.common import LIVE_TABLE_PROVISION_INTERVAL_SECS, console |
10 | 11 | from dstack._internal.core.models.events import Event, EventTargetType |
11 | 12 | from dstack._internal.server.schemas.events import LIST_EVENTS_DEFAULT_LIMIT |
12 | 13 | from dstack.api.server._events import EventsAPIClient |
@@ -50,6 +51,82 @@ def list( |
50 | 51 | prev_recorded_at = events[-1].recorded_at |
51 | 52 |
|
52 | 53 |
|
| 54 | +class EventTracker: |
| 55 | + """ |
| 56 | + Tracks new events from the server. Implements a sliding window mechanism to avoid |
| 57 | + missing events that are commited with a delay. |
| 58 | + """ |
| 59 | + |
| 60 | + def __init__( |
| 61 | + self, |
| 62 | + client: EventsAPIClient, |
| 63 | + filters: EventListFilters, |
| 64 | + since: Optional[datetime], |
| 65 | + event_delay_tolerance: timedelta = timedelta(seconds=20), |
| 66 | + ) -> None: |
| 67 | + self._client = client |
| 68 | + self._filters = filters |
| 69 | + self._since = since |
| 70 | + self._event_delay_tolerance = event_delay_tolerance |
| 71 | + self._seen_events: dict[uuid.UUID, _SeenEvent] = {} |
| 72 | + self._latest_event: Optional[Event] = None |
| 73 | + |
| 74 | + def poll(self) -> Iterator[Event]: |
| 75 | + """ |
| 76 | + Fetches the next batch of events from the server. |
| 77 | + """ |
| 78 | + |
| 79 | + if self._since is None and self._latest_event is None: |
| 80 | + # First batch without `since` - fetch some recent events |
| 81 | + event_stream = reversed(self._client.list(ascending=False, **asdict(self._filters))) |
| 82 | + else: |
| 83 | + configured_since = self._since or datetime.fromtimestamp(0) |
| 84 | + latest_event_recorded_at = ( |
| 85 | + self._latest_event.recorded_at |
| 86 | + if self._latest_event is not None |
| 87 | + else datetime.fromtimestamp(0) |
| 88 | + ) |
| 89 | + since = max( |
| 90 | + configured_since.astimezone(), |
| 91 | + latest_event_recorded_at.astimezone() - self._event_delay_tolerance, |
| 92 | + ) |
| 93 | + self._cleanup_seen_events(before=since) |
| 94 | + event_stream = EventPaginator(self._client).list(self._filters, since, ascending=True) |
| 95 | + |
| 96 | + for event in event_stream: |
| 97 | + if event.id not in self._seen_events: |
| 98 | + self._seen_events[event.id] = _SeenEvent(recorded_at=event.recorded_at) |
| 99 | + yield event |
| 100 | + self._latest_event = event |
| 101 | + |
| 102 | + def stream_forever( |
| 103 | + self, |
| 104 | + update_interval: timedelta = timedelta(seconds=LIVE_TABLE_PROVISION_INTERVAL_SECS), |
| 105 | + ) -> Iterator[Event]: |
| 106 | + """ |
| 107 | + Yields events as they are received from the server. |
| 108 | + """ |
| 109 | + |
| 110 | + while True: |
| 111 | + for event in self.poll(): |
| 112 | + yield event |
| 113 | + time.sleep(update_interval.total_seconds()) |
| 114 | + |
| 115 | + def _cleanup_seen_events(self, before: datetime) -> None: |
| 116 | + ids_to_delete = { |
| 117 | + event_id |
| 118 | + for event_id, seen_event in self._seen_events.items() |
| 119 | + if seen_event.recorded_at.astimezone() < before.astimezone() |
| 120 | + } |
| 121 | + for event_id in ids_to_delete: |
| 122 | + del self._seen_events[event_id] |
| 123 | + |
| 124 | + |
| 125 | +@dataclass |
| 126 | +class _SeenEvent: |
| 127 | + recorded_at: datetime |
| 128 | + |
| 129 | + |
53 | 130 | def print_event(event: Event) -> None: |
54 | 131 | recorded_at = event.recorded_at.astimezone().strftime("%Y-%m-%d %H:%M:%S") |
55 | 132 | targets = ", ".join(f"{target.type} {target.name}" for target in event.targets) |
|
0 commit comments