Skip to content

Commit 614f8b2

Browse files
Fix verda tests for verda>=1.22.0 InstanceType changes (#221)
1 parent e4490d4 commit 614f8b2

2 files changed

Lines changed: 36 additions & 21 deletions

File tree

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ oci = [
5353
"pydantic>=1.10.10,<2.0.0",
5454
]
5555
verda = [
56-
'verda',
56+
'verda>=1.22.0',
5757
]
5858
maybe_verda = [
59-
'verda; python_version>="3.10"',
59+
'verda>=1.22.0; python_version>="3.10"',
6060
]
6161
all = ["gpuhunt[aws,azure,maybe_verda,gcp,maybe_nebius,oci]"]
6262
dev = [

src/tests/providers/test_verda.py

Lines changed: 34 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -17,85 +17,91 @@
1717
def raw_instance_types() -> list[dict]:
1818
# verda.instance_types.get()
1919
one_gpu = {
20-
"best_for": [
21-
"Gargantuan ML models",
22-
"Multi-GPU training",
23-
"FP64 HPC",
24-
"NVLINK",
25-
],
20+
"best_for": [],
2621
"cpu": {"description": "30 CPU", "number_of_cores": 30},
22+
"currency": "usd",
2723
"deploy_warning": "H100: Use Nvidia driver 535 or higher for best performance",
2824
"description": "Dedicated Hardware Instance",
25+
"display_name": None,
2926
"gpu": {"description": "1x H100 SXM5 80GB", "number_of_gpus": 1},
3027
"gpu_memory": {"description": "80GB GPU RAM", "size_in_gigabytes": 80},
3128
"id": "c01dd00d-0000-480b-ae4e-d429115d055b",
3229
"instance_type": "1H100.80S.30V",
30+
"manufacturer": "NVIDIA",
3331
"memory": {"description": "120GB RAM", "size_in_gigabytes": 120},
34-
"model": "H100 80GB",
32+
"model": "H100",
3533
"name": "H100 SXM5 80GB",
36-
"p2p": "",
34+
"p2p": None,
3735
"price_per_hour": "3.95",
3836
"spot_price": "1.70",
3937
"storage": {"description": "dynamic"},
38+
"supported_os": ["ubuntu-22.04", "ubuntu-24.04"],
4039
}
4140

4241
two_gpu = {
43-
"best_for": ["Large ML models", "FP32 calculations", "Single-GPU training"],
42+
"best_for": [],
4443
"cpu": {"description": "20 CPU", "number_of_cores": 20},
44+
"currency": "usd",
4545
"deploy_warning": None,
4646
"description": "Dedicated Hardware Instance",
47+
"display_name": None,
4748
"gpu": {"description": "2x RTX A6000 48GB", "number_of_gpus": 2},
4849
"gpu_memory": {"description": "96GB GPU RAM", "size_in_gigabytes": 96},
4950
"id": "07cf5dc1-a5d2-4972-ae4e-d429115d055b",
5051
"instance_type": "2A6000.20V",
52+
"manufacturer": "NVIDIA",
5153
"memory": {"description": "120GB RAM", "size_in_gigabytes": 120},
5254
"model": "RTX A6000",
5355
"name": "RTX A6000 48GB",
54-
"p2p": "",
56+
"p2p": "50GB/s",
5557
"price_per_hour": "1.98",
5658
"spot_price": "0.70",
5759
"storage": {"description": "dynamic"},
60+
"supported_os": ["ubuntu-22.04", "ubuntu-24.04"],
5861
}
5962

6063
cpu_instance = {
61-
"best_for": ["Running services", "API server", "Data transfers"],
64+
"best_for": [],
6265
"cpu": {"description": "120 CPU", "number_of_cores": 120},
66+
"currency": "usd",
6367
"deploy_warning": None,
6468
"description": "Dedicated Hardware Instance",
69+
"display_name": "Rome/Milan",
6570
"gpu": {"description": "", "number_of_gpus": 0},
6671
"gpu_memory": {"description": "", "size_in_gigabytes": 0},
6772
"id": "ccc00007-a5d2-4972-ae4e-d429115d055b",
6873
"instance_type": "CPU.120V.480G",
74+
"manufacturer": "AMD",
6975
"memory": {"description": "480GB RAM", "size_in_gigabytes": 480},
7076
"model": "CPU Node",
7177
"name": "AMD EPYC",
72-
"p2p": "",
78+
"p2p": None,
7379
"price_per_hour": "3.00",
7480
"spot_price": "1.50",
7581
"storage": {"description": "dynamic"},
82+
"supported_os": ["ubuntu-22.04", "ubuntu-24.04"],
7683
}
7784

7885
minimal = {
79-
"best_for": [
80-
"Small ML models",
81-
"Multi-GPU training",
82-
"FP64 calculations",
83-
"NVLINK",
84-
],
86+
"best_for": [],
8587
"cpu": {"description": "6 CPU", "number_of_cores": 6},
88+
"currency": "usd",
8689
"deploy_warning": None,
8790
"description": "Dedicated Hardware Instance",
91+
"display_name": None,
8892
"gpu": {"description": "1x Tesla V100 16GB", "number_of_gpus": 1},
8993
"gpu_memory": {"description": "16GB GPU RAM", "size_in_gigabytes": 16},
9094
"id": "04cf5dc1-a5d2-4972-ae4e-d429115d055b",
9195
"instance_type": "1V100.6V",
96+
"manufacturer": "NVIDIA",
9297
"memory": {"description": "23GB RAM", "size_in_gigabytes": 23},
9398
"model": "Tesla V100",
9499
"name": "Tesla V100 16GB",
95-
"p2p": "",
100+
"p2p": None,
96101
"price_per_hour": "0.89",
97102
"spot_price": "0.25",
98103
"storage": {"description": "225GB NVME", "size_in_gigabytes": 225},
104+
"supported_os": ["ubuntu-22.04", "ubuntu-24.04"],
99105
}
100106

101107
return [one_gpu, two_gpu, cpu_instance, minimal]
@@ -162,6 +168,15 @@ def instance_types(raw_instance_type: dict) -> InstanceType:
162168
memory=raw_instance_type["memory"],
163169
gpu_memory=raw_instance_type["gpu_memory"],
164170
storage=raw_instance_type["storage"],
171+
best_for=raw_instance_type["best_for"],
172+
model=raw_instance_type["model"],
173+
name=raw_instance_type["name"],
174+
p2p=raw_instance_type["p2p"],
175+
currency=raw_instance_type["currency"],
176+
manufacturer=raw_instance_type["manufacturer"],
177+
display_name=raw_instance_type["display_name"],
178+
supported_os=raw_instance_type["supported_os"],
179+
deploy_warning=raw_instance_type.get("deploy_warning"),
165180
)
166181
return instance
167182

0 commit comments

Comments
 (0)