Skip to content

Commit 5e1932d

Browse files
[Feature]: Add JSON output option for CLI commands #3322 (WIP)
This is a not-backward compatible change. It refactors existing `--json` code to make it eaasy to maintain and keep it extensible and backward-compatible in the future.
1 parent 41fcee2 commit 5e1932d

4 files changed

Lines changed: 41 additions & 46 deletions

File tree

src/dstack/_internal/cli/commands/offer.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import argparse
22
from pathlib import Path
3-
from typing import List
3+
from typing import List, Literal, cast
44

55
from dstack._internal.cli.commands import APIBaseCommand
66
from dstack._internal.cli.services.args import cpu_spec, disk_spec, gpu_spec
@@ -130,7 +130,12 @@ def _command(self, args: argparse.Namespace):
130130
else:
131131
if args.group_by:
132132
gpus = self._list_gpus(args, run_spec)
133-
print_gpu_json(gpus, run_spec, args.group_by, self.api.project)
133+
print_gpu_json(
134+
gpus,
135+
run_spec,
136+
cast(List[Literal["gpu", "backend", "region", "count"]], args.group_by),
137+
self.api.project,
138+
)
134139
else:
135140
run_plan = self.api.client.runs.get_plan(
136141
self.api.project,

src/dstack/_internal/cli/models/__init__.py

Whitespace-only changes.
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from typing import List, Literal
2+
3+
from dstack._internal.core.models.common import CoreConfig, generate_dual_core_model
4+
from dstack._internal.core.models.runs import Requirements
5+
from dstack._internal.server.schemas.gpus import GpuGroup
6+
from dstack._internal.utils.json_utils import pydantic_orjson_dumps_with_indent
7+
8+
9+
class OfferCommandOutputConfig(CoreConfig):
10+
json_dumps = pydantic_orjson_dumps_with_indent
11+
12+
13+
class OfferCommandOutput(generate_dual_core_model(OfferCommandOutputConfig)):
14+
"""JSON output model for `dstack offer` command with GPU grouping."""
15+
16+
project: str
17+
requirements: Requirements
18+
group_by: List[Literal["gpu", "backend", "region", "count"]]
19+
gpus: List[GpuGroup]

src/dstack/_internal/cli/utils/gpu.py

Lines changed: 15 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,21 @@
11
import shutil
2-
from typing import List
2+
from typing import List, Literal
33

44
from rich.table import Table
55

6+
from dstack._internal.cli.models.offer import OfferCommandOutput
67
from dstack._internal.cli.utils.common import console
78
from dstack._internal.core.models.profiles import SpotPolicy
89
from dstack._internal.core.models.runs import Requirements, RunSpec, get_policy_map
910
from dstack._internal.server.schemas.gpus import GpuGroup
1011

1112

12-
def print_gpu_json(gpus, run_spec, group_by_cli, api_project):
13+
def print_gpu_json(
14+
gpus: List[GpuGroup],
15+
run_spec: RunSpec,
16+
group_by: List[Literal["gpu", "backend", "region", "count"]],
17+
project: str,
18+
):
1319
"""Print GPU information in JSON format."""
1420
req = Requirements(
1521
resources=run_spec.configuration.resources,
@@ -18,49 +24,14 @@ def print_gpu_json(gpus, run_spec, group_by_cli, api_project):
1824
reservation=run_spec.configuration.reservation,
1925
)
2026

21-
if req.spot is None:
22-
spot_policy = "auto"
23-
elif req.spot:
24-
spot_policy = "spot"
25-
else:
26-
spot_policy = "on-demand"
27-
28-
output = {
29-
"project": api_project,
30-
"user": "admin", # TODO: Get actual user name
31-
"resources": req.resources.dict(),
32-
"spot_policy": spot_policy,
33-
"max_price": req.max_price,
34-
"reservation": run_spec.configuration.reservation,
35-
"group_by": group_by_cli,
36-
"gpus": [],
37-
}
38-
39-
for gpu_group in gpus:
40-
gpu_data = {
41-
"name": gpu_group.name,
42-
"memory_mib": gpu_group.memory_mib,
43-
"vendor": gpu_group.vendor.value,
44-
"availability": [av.value for av in gpu_group.availability],
45-
"spot": gpu_group.spot,
46-
"count": {"min": gpu_group.count.min, "max": gpu_group.count.max},
47-
"price": {"min": gpu_group.price.min, "max": gpu_group.price.max},
48-
}
49-
50-
if gpu_group.backend:
51-
gpu_data["backend"] = gpu_group.backend.value
52-
if gpu_group.backends:
53-
gpu_data["backends"] = [b.value for b in gpu_group.backends]
54-
if gpu_group.region:
55-
gpu_data["region"] = gpu_group.region
56-
if gpu_group.regions:
57-
gpu_data["regions"] = gpu_group.regions
58-
59-
output["gpus"].append(gpu_data)
60-
61-
import json
27+
output = OfferCommandOutput(
28+
project=project,
29+
requirements=req,
30+
group_by=group_by,
31+
gpus=gpus,
32+
)
6233

63-
print(json.dumps(output, indent=2))
34+
print(output.json())
6435

6536

6637
def print_gpu_table(gpus: List[GpuGroup], run_spec: RunSpec, group_by: List[str], project: str):

0 commit comments

Comments
 (0)