|
64 | 64 | ModelDeploymentConfigSummary,
|
65 | 65 | MultiModelDeploymentConfigLoader,
|
66 | 66 | )
|
67 |
| -from ads.aqua.modeldeployment.constants import DEFAULT_POLL_INTERVAL, DEFAULT_WAIT_TIME, SHAPE_MAP |
| 67 | +from ads.aqua.modeldeployment.constants import ( |
| 68 | + DEFAULT_POLL_INTERVAL, |
| 69 | + DEFAULT_WAIT_TIME, |
| 70 | + SHAPE_MAP, |
| 71 | +) |
68 | 72 | from ads.aqua.modeldeployment.entities import (
|
69 | 73 | AquaDeployment,
|
70 | 74 | AquaDeploymentDetail,
|
@@ -1282,28 +1286,25 @@ def valid_compute_shapes(self, **kwargs) -> List["ComputeShapeSummary"]:
|
1282 | 1286 | oci_shape = set_user_shapes.get(name)
|
1283 | 1287 |
|
1284 | 1288 | compute_shape = ComputeShapeSummary(
|
1285 |
| - available=True, |
1286 |
| - core_count= oci_shape.core_count, |
1287 |
| - memory_in_gbs= oci_shape.memory_in_gbs, |
1288 |
| - shape_series= SHAPE_MAP.get(oci_shape.shape_series, "GPU"), |
1289 |
| - name= oci_shape.name, |
1290 |
| - gpu_specs= spec |
1291 |
| - ) |
| 1289 | + available=True, |
| 1290 | + core_count=oci_shape.core_count, |
| 1291 | + memory_in_gbs=oci_shape.memory_in_gbs, |
| 1292 | + shape_series=SHAPE_MAP.get(oci_shape.shape_series, "GPU"), |
| 1293 | + name=oci_shape.name, |
| 1294 | + gpu_specs=spec, |
| 1295 | + ) |
1292 | 1296 | else:
|
1293 | 1297 | compute_shape = ComputeShapeSummary(
|
1294 |
| - available=False, name=name, shape_series="GPU", gpu_specs=spec |
1295 |
| - ) |
| 1298 | + available=False, name=name, shape_series="GPU", gpu_specs=spec |
| 1299 | + ) |
1296 | 1300 | valid_shapes.append(compute_shape)
|
1297 | 1301 |
|
1298 | 1302 | valid_shapes.sort(
|
1299 | 1303 | key=lambda shape: shape.gpu_specs.gpu_memory_in_gbs, reverse=True
|
1300 | 1304 | )
|
1301 | 1305 | return valid_shapes
|
1302 | 1306 |
|
1303 |
| - |
1304 |
| - def recommend_shape( |
1305 |
| - self, **kwargs |
1306 |
| - ) -> Union[Table, ShapeRecommendationReport]: |
| 1307 | + def recommend_shape(self, **kwargs) -> Union[Table, ShapeRecommendationReport]: |
1307 | 1308 | """
|
1308 | 1309 | For the CLI (set generate_table = True), generates the table (in rich diff) with valid
|
1309 | 1310 | GPU deployment shapes for the provided model and configuration.
|
@@ -1334,9 +1335,6 @@ def recommend_shape(
|
1334 | 1335 | AquaValueError
|
1335 | 1336 | If model type is unsupported by tool (no recommendation report generated)
|
1336 | 1337 | """
|
1337 |
| - # generate_table = kwargs.pop( |
1338 |
| - # "generate_table", True |
1339 |
| - # ) # Generate rich diff table by default |
1340 | 1338 | compartment_id = kwargs.get("compartment_id", COMPARTMENT_OCID)
|
1341 | 1339 |
|
1342 | 1340 | kwargs["shapes"] = self.valid_compute_shapes(compartment_id=compartment_id)
|
|
0 commit comments