diff --git a/model/aws/autoscaling/extensions/models/auto_scaling_group.ts b/model/aws/autoscaling/extensions/models/auto_scaling_group.ts index e6fdace34..9796bf58b 100644 --- a/model/aws/autoscaling/extensions/models/auto_scaling_group.ts +++ b/model/aws/autoscaling/extensions/models/auto_scaling_group.ts @@ -330,7 +330,9 @@ const GlobalArgsSchema = z.object({ TargetGroupARNs: z.array(z.string()).describe( "The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) in the *Amazon EC2 Auto Scaling User Guide*.", ).optional(), - AvailabilityZoneIds: z.array(z.string()).optional(), + AvailabilityZoneIds: z.array(z.string()).describe( + "The Availability Zone IDs where the Auto Scaling group can launch instances.", + ).optional(), Cooldown: z.string().describe( "*Only needed if you use simple scaling policies.* The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide*. Default: 300 seconds", ).optional(), @@ -562,7 +564,9 @@ const InputsSchema = z.object({ TargetGroupARNs: z.array(z.string()).describe( "The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) in the *Amazon EC2 Auto Scaling User Guide*.", ).optional(), - AvailabilityZoneIds: z.array(z.string()).optional(), + AvailabilityZoneIds: z.array(z.string()).describe( + "The Availability Zone IDs where the Auto Scaling group can launch instances.", + ).optional(), Cooldown: z.string().describe( "*Only needed if you use simple scaling policies.* The amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide*. Default: 300 seconds", ).optional(), @@ -705,7 +709,7 @@ const InputsSchema = z.object({ /** Swamp extension model for AutoScaling AutoScalingGroup. Registered at `@swamp/aws/autoscaling/auto-scaling-group`. */ export const model = { type: "@swamp/aws/autoscaling/auto-scaling-group", - version: "2026.04.23.2", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.03.27.1", @@ -742,6 +746,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/aws/autoscaling/manifest.yaml b/model/aws/autoscaling/manifest.yaml index 757018f43..ff5126a07 100644 --- a/model/aws/autoscaling/manifest.yaml +++ b/model/aws/autoscaling/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/aws/autoscaling" -version: "2026.04.23.3" +version: "2026.05.06.1" description: "AWS AUTOSCALING infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -9,6 +9,8 @@ labels: - autoscaling - cloud - infrastructure +releaseNotes: | + - Updated: auto_scaling_group models: - auto_scaling_group.ts - launch_configuration.ts diff --git a/model/aws/bedrockagentcore/manifest.yaml b/model/aws/bedrockagentcore/manifest.yaml index 603da89fd..55e9cc616 100644 --- a/model/aws/bedrockagentcore/manifest.yaml +++ b/model/aws/bedrockagentcore/manifest.yaml @@ -9,8 +9,6 @@ labels: - bedrockagentcore - cloud - infrastructure -releaseNotes: | - - Updated: evaluator models: - api_key_credential_provider.ts - browser_custom.ts diff --git a/model/aws/chime/manifest.yaml b/model/aws/chime/manifest.yaml index 83145c5fa..f0d56fc5d 100644 --- a/model/aws/chime/manifest.yaml +++ b/model/aws/chime/manifest.yaml @@ -9,8 +9,6 @@ labels: - chime - cloud - infrastructure -releaseNotes: | - - Added: app_instance_bot models: - app_instance.ts - app_instance_bot.ts diff --git a/model/aws/cloudfront/extensions/models/distribution.ts b/model/aws/cloudfront/extensions/models/distribution.ts index eaa227df2..98dc5c18e 100644 --- a/model/aws/cloudfront/extensions/models/distribution.ts +++ b/model/aws/cloudfront/extensions/models/distribution.ts @@ -454,8 +454,9 @@ const TrustStoreConfigSchema = z.object({ }); const ViewerMtlsConfigSchema = z.object({ - Mode: z.enum(["required", "optional"]).describe("The viewer mTLS mode.") - .optional(), + Mode: z.enum(["required", "optional", "passthrough"]).describe( + "The viewer mTLS mode.", + ).optional(), TrustStoreConfig: TrustStoreConfigSchema.describe( "The trust store configuration associated with the viewer mTLS configuration.", ).optional(), @@ -708,7 +709,7 @@ const InputsSchema = z.object({ /** Swamp extension model for CloudFront Distribution. Registered at `@swamp/aws/cloudfront/distribution`. */ export const model = { type: "@swamp/aws/cloudfront/distribution", - version: "2026.05.01.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -740,6 +741,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/aws/cloudfront/extensions/models/trust_store.ts b/model/aws/cloudfront/extensions/models/trust_store.ts index 04f92d130..a2a8f365d 100644 --- a/model/aws/cloudfront/extensions/models/trust_store.ts +++ b/model/aws/cloudfront/extensions/models/trust_store.ts @@ -50,6 +50,7 @@ const GlobalArgsSchema = z.object({ CaCertificatesBundleS3Location: CaCertificatesBundleS3LocationSchema .describe("The CA certificates bundle location in Amazon S3."), }).describe("A CA certificates bundle source.").optional(), + UseClientCertificateOCSPEndpoint: z.boolean().optional(), Tags: z.array(TagSchema).describe( "A complex type that contains zero or more Tag elements.", ).optional(), @@ -62,6 +63,7 @@ const StateSchema = z.object({ CaCertificatesBundleSource: z.object({ CaCertificatesBundleS3Location: CaCertificatesBundleS3LocationSchema, }).optional(), + UseClientCertificateOCSPEndpoint: z.boolean().optional(), Status: z.string().optional(), ETag: z.string().optional(), LastModifiedTime: z.string().optional(), @@ -78,6 +80,7 @@ const InputsSchema = z.object({ CaCertificatesBundleS3Location: CaCertificatesBundleS3LocationSchema .describe("The CA certificates bundle location in Amazon S3.").optional(), }).describe("A CA certificates bundle source.").optional(), + UseClientCertificateOCSPEndpoint: z.boolean().optional(), Tags: z.array(TagSchema).describe( "A complex type that contains zero or more Tag elements.", ).optional(), @@ -86,7 +89,7 @@ const InputsSchema = z.object({ /** Swamp extension model for CloudFront TrustStore. Registered at `@swamp/aws/cloudfront/trust-store`. */ export const model = { type: "@swamp/aws/cloudfront/trust-store", - version: "2026.04.23.2", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -113,6 +116,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "Added: UseClientCertificateOCSPEndpoint", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/aws/cloudfront/manifest.yaml b/model/aws/cloudfront/manifest.yaml index 78ebfb294..17c519dbb 100644 --- a/model/aws/cloudfront/manifest.yaml +++ b/model/aws/cloudfront/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/aws/cloudfront" -version: "2026.05.01.1" +version: "2026.05.06.1" description: "AWS CLOUDFRONT infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -9,6 +9,8 @@ labels: - cloudfront - cloud - infrastructure +releaseNotes: | + - Updated: distribution, trust_store models: - anycast_ip_list.ts - cache_policy.ts diff --git a/model/aws/datazone/manifest.yaml b/model/aws/datazone/manifest.yaml index 04571c815..c8d0d82f1 100644 --- a/model/aws/datazone/manifest.yaml +++ b/model/aws/datazone/manifest.yaml @@ -9,8 +9,6 @@ labels: - datazone - cloud - infrastructure -releaseNotes: | - - Updated: domain, group_profile, project, user_profile models: - connection.ts - data_source.ts diff --git a/model/aws/deadline/manifest.yaml b/model/aws/deadline/manifest.yaml index 933d4026d..17b86629d 100644 --- a/model/aws/deadline/manifest.yaml +++ b/model/aws/deadline/manifest.yaml @@ -9,8 +9,6 @@ labels: - deadline - cloud - infrastructure -releaseNotes: | - - Updated: fleet models: - farm.ts - fleet.ts diff --git a/model/aws/eks/extensions/models/addon.ts b/model/aws/eks/extensions/models/addon.ts index edc99ed64..fccff5c71 100644 --- a/model/aws/eks/extensions/models/addon.ts +++ b/model/aws/eks/extensions/models/addon.ts @@ -26,7 +26,7 @@ const PodIdentityAssociationSchema = z.object({ "The Kubernetes service account that the pod identity association is created for.", ), RoleArn: z.string().regex( - new RegExp("^arn:aws(-cn|-us-gov|-iso(-[a-z])?)?:iam::\\d{12}:(role)\\/*"), + new RegExp("^arn:aws[a-zA-Z-]*:iam::\\d{12}:(role)\\/*"), ).describe( "The IAM role ARN that the pod identity association is created for.", ), @@ -124,7 +124,7 @@ const InputsSchema = z.object({ /** Swamp extension model for EKS Addon. Registered at `@swamp/aws/eks/addon`. */ export const model = { type: "@swamp/aws/eks/addon", - version: "2026.04.23.2", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -151,6 +151,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/aws/eks/manifest.yaml b/model/aws/eks/manifest.yaml index 3404cb47b..f098fc281 100644 --- a/model/aws/eks/manifest.yaml +++ b/model/aws/eks/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/aws/eks" -version: "2026.04.23.3" +version: "2026.05.06.1" description: "AWS EKS infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -9,6 +9,8 @@ labels: - eks - cloud - infrastructure +releaseNotes: | + - Updated: addon models: - access_entry.ts - addon.ts diff --git a/model/aws/elasticache/extensions/models/cache_cluster.ts b/model/aws/elasticache/extensions/models/cache_cluster.ts new file mode 100644 index 000000000..db7ce68ce --- /dev/null +++ b/model/aws/elasticache/extensions/models/cache_cluster.ts @@ -0,0 +1,433 @@ +// Auto-generated extension model for @swamp/aws/elasticache/cache-cluster +// Do not edit manually. Re-generate with: deno task generate:aws + +// deno-lint-ignore-file no-explicit-any + +/** + * Swamp extension model for ElastiCache CacheCluster (AWS::ElastiCache::CacheCluster). + * + * Wraps the CloudFormation resource type as a swamp model so create, + * get, update, delete, and sync can be driven through `swamp model`. + * + * @module + */ + +import { z } from "npm:zod@4.3.6"; +import { + createResource, + deleteResource, + isResourceNotFoundError, + readResource, + updateResource, +} from "./_lib/aws.ts"; + +const TagSchema = z.object({ + Value: z.string(), + Key: z.string(), +}); + +const CloudWatchLogsDestinationDetailsSchema = z.object({ + LogGroup: z.string().describe("The name of the CloudWatch Logs log group."), +}); + +const KinesisFirehoseDestinationDetailsSchema = z.object({ + DeliveryStream: z.string().describe( + "The name of the Kinesis Data Firehose delivery stream", + ), +}); + +const DestinationDetailsSchema = z.object({ + CloudWatchLogsDetails: CloudWatchLogsDestinationDetailsSchema.describe( + "The configuration details of the CloudWatch Logs destination", + ).optional(), + KinesisFirehoseDetails: KinesisFirehoseDestinationDetailsSchema.describe( + "The configuration details of the Kinesis Data Firehose destination.", + ).optional(), +}); + +const LogDeliveryConfigurationRequestSchema = z.object({ + LogType: z.string().describe( + "Valid value is either slow-log, which refers to slow-log or engine-log", + ), + LogFormat: z.string().describe("Valid values are either json or text"), + DestinationType: z.string().describe( + "Specify either CloudWatch Logs or Kinesis Data Firehose as the destination type.", + ), + DestinationDetails: DestinationDetailsSchema.describe( + "Configuration details of either a CloudWatch Logs destination or Kinesis Data Firehose destination.", + ), +}); + +const GlobalArgsSchema = z.object({ + AutoMinorVersionUpgrade: z.boolean().describe( + "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign.", + ).optional(), + AZMode: z.string().describe( + "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.", + ).optional(), + CacheNodeType: z.string().describe( + "The compute and memory capacity of the nodes in the node group (shard).", + ), + CacheParameterGroupName: z.string().describe( + "The name of the parameter group to associate with this cluster.", + ).optional(), + CacheSecurityGroupNames: z.array(z.string()).describe( + "A list of security group names to associate with this cluster.", + ).optional(), + CacheSubnetGroupName: z.string().describe( + "The name of the subnet group to be used for the cluster.", + ).optional(), + NotificationTopicArn: z.string().describe( + "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.", + ).optional(), + SnapshotArns: z.array(z.string()).describe( + "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3.", + ).optional(), + NumCacheNodes: z.number().int().describe( + "The number of cache nodes that the cache cluster should have.", + ), + SnapshotName: z.string().describe( + "The name of a Redis snapshot from which to restore data into the new node group (shard).", + ).optional(), + PreferredAvailabilityZones: z.array(z.string()).describe( + "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.", + ).optional(), + VpcSecurityGroupIds: z.array(z.string()).describe( + "One or more VPC security groups associated with the cluster.", + ).optional(), + ClusterName: z.string().describe("A name for the cache cluster.").optional(), + Engine: z.string().describe( + "The name of the cache engine to be used for this cluster.", + ), + Tags: z.array(TagSchema).describe( + "A list of tags to be added to this resource.", + ).optional(), + EngineVersion: z.string().describe( + "The version number of the cache engine to be used for this cluster", + ).optional(), + PreferredMaintenanceWindow: z.string().describe( + "Specifies the weekly time range during which maintenance on the cluster is performed.", + ).optional(), + PreferredAvailabilityZone: z.string().describe( + "The EC2 Availability Zone in which the cluster is created.", + ).optional(), + SnapshotWindow: z.string().describe( + "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).", + ).optional(), + NetworkType: z.string().describe( + "The network type parameter for cachecluster.", + ).optional(), + IpDiscovery: z.string().describe( + "The Ip Discovery parameter for cachecluster.", + ).optional(), + SnapshotRetentionLimit: z.number().int().describe( + "The number of days for which ElastiCache retains automatic snapshots before deleting them.", + ).optional(), + LogDeliveryConfigurations: z.array(LogDeliveryConfigurationRequestSchema) + .describe("Specifies the destination, format and type of the logs") + .optional(), + TransitEncryptionEnabled: z.boolean().describe( + "A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled after the cluster is created", + ).optional(), +}); + +const StateSchema = z.object({ + AutoMinorVersionUpgrade: z.boolean().optional(), + AZMode: z.string().optional(), + CacheNodeType: z.string().optional(), + CacheParameterGroupName: z.string().optional(), + CacheSecurityGroupNames: z.array(z.string()).optional(), + CacheSubnetGroupName: z.string().optional(), + NotificationTopicArn: z.string().optional(), + SnapshotArns: z.array(z.string()).optional(), + Port: z.number().optional(), + NumCacheNodes: z.number().optional(), + SnapshotName: z.string().optional(), + PreferredAvailabilityZones: z.array(z.string()).optional(), + VpcSecurityGroupIds: z.array(z.string()).optional(), + ClusterName: z.string(), + Engine: z.string().optional(), + Tags: z.array(TagSchema).optional(), + EngineVersion: z.string().optional(), + PreferredMaintenanceWindow: z.string().optional(), + PreferredAvailabilityZone: z.string().optional(), + SnapshotWindow: z.string().optional(), + NetworkType: z.string().optional(), + IpDiscovery: z.string().optional(), + SnapshotRetentionLimit: z.number().optional(), + LogDeliveryConfigurations: z.array(LogDeliveryConfigurationRequestSchema) + .optional(), + TransitEncryptionEnabled: z.boolean().optional(), + ConfigurationEndpoint: z.object({ + Address: z.string(), + Port: z.string(), + }).optional(), + RedisEndpoint: z.object({ + Address: z.string(), + Port: z.string(), + }).optional(), +}).passthrough(); + +type StateData = z.infer; + +const InputsSchema = z.object({ + AutoMinorVersionUpgrade: z.boolean().describe( + "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign.", + ).optional(), + AZMode: z.string().describe( + "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.", + ).optional(), + CacheNodeType: z.string().describe( + "The compute and memory capacity of the nodes in the node group (shard).", + ).optional(), + CacheParameterGroupName: z.string().describe( + "The name of the parameter group to associate with this cluster.", + ).optional(), + CacheSecurityGroupNames: z.array(z.string()).describe( + "A list of security group names to associate with this cluster.", + ).optional(), + CacheSubnetGroupName: z.string().describe( + "The name of the subnet group to be used for the cluster.", + ).optional(), + NotificationTopicArn: z.string().describe( + "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.", + ).optional(), + SnapshotArns: z.array(z.string()).describe( + "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3.", + ).optional(), + NumCacheNodes: z.number().int().describe( + "The number of cache nodes that the cache cluster should have.", + ).optional(), + SnapshotName: z.string().describe( + "The name of a Redis snapshot from which to restore data into the new node group (shard).", + ).optional(), + PreferredAvailabilityZones: z.array(z.string()).describe( + "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.", + ).optional(), + VpcSecurityGroupIds: z.array(z.string()).describe( + "One or more VPC security groups associated with the cluster.", + ).optional(), + ClusterName: z.string().describe("A name for the cache cluster.").optional(), + Engine: z.string().describe( + "The name of the cache engine to be used for this cluster.", + ).optional(), + Tags: z.array(TagSchema).describe( + "A list of tags to be added to this resource.", + ).optional(), + EngineVersion: z.string().describe( + "The version number of the cache engine to be used for this cluster", + ).optional(), + PreferredMaintenanceWindow: z.string().describe( + "Specifies the weekly time range during which maintenance on the cluster is performed.", + ).optional(), + PreferredAvailabilityZone: z.string().describe( + "The EC2 Availability Zone in which the cluster is created.", + ).optional(), + SnapshotWindow: z.string().describe( + "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).", + ).optional(), + NetworkType: z.string().describe( + "The network type parameter for cachecluster.", + ).optional(), + IpDiscovery: z.string().describe( + "The Ip Discovery parameter for cachecluster.", + ).optional(), + SnapshotRetentionLimit: z.number().int().describe( + "The number of days for which ElastiCache retains automatic snapshots before deleting them.", + ).optional(), + LogDeliveryConfigurations: z.array(LogDeliveryConfigurationRequestSchema) + .describe("Specifies the destination, format and type of the logs") + .optional(), + TransitEncryptionEnabled: z.boolean().describe( + "A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled after the cluster is created", + ).optional(), +}); + +/** Swamp extension model for ElastiCache CacheCluster. Registered at `@swamp/aws/elasticache/cache-cluster`. */ +export const model = { + type: "@swamp/aws/elasticache/cache-cluster", + version: "2026.05.06.1", + globalArguments: GlobalArgsSchema, + inputsSchema: InputsSchema, + resources: { + state: { + description: "ElastiCache CacheCluster resource state", + schema: StateSchema, + lifetime: "infinite", + garbageCollection: 10, + }, + }, + methods: { + create: { + description: "Create a ElastiCache CacheCluster", + arguments: z.object({}), + execute: async (_args: Record, context: any) => { + const g = context.globalArgs; + const desiredState: Record = {}; + for (const [key, value] of Object.entries(g)) { + if (value !== undefined) desiredState[key] = value; + } + const result = await createResource( + "AWS::ElastiCache::CacheCluster", + desiredState, + ) as StateData; + const instanceName = + ((result.ClusterName ?? g.ClusterName)?.toString() ?? "current") + .replace(/[\/\\]/g, "_").replace(/\.\./g, "_").replace(/\0/g, ""); + const handle = await context.writeResource( + "state", + instanceName, + result, + ); + return { dataHandles: [handle] }; + }, + }, + get: { + description: "Get a ElastiCache CacheCluster", + arguments: z.object({ + identifier: z.string().describe( + "The primary identifier of the ElastiCache CacheCluster", + ), + }), + execute: async (args: { identifier: string }, context: any) => { + const result = await readResource( + "AWS::ElastiCache::CacheCluster", + args.identifier, + ) as StateData; + const instanceName = + ((result.ClusterName ?? context.globalArgs.ClusterName)?.toString() ?? + args.identifier).replace(/[\/\\]/g, "_").replace(/\.\./g, "_") + .replace(/\0/g, ""); + const handle = await context.writeResource( + "state", + instanceName, + result, + ); + return { dataHandles: [handle] }; + }, + }, + update: { + description: "Update a ElastiCache CacheCluster", + arguments: z.object({}), + execute: async (_args: Record, context: any) => { + const g = context.globalArgs; + const instanceName = (g.ClusterName?.toString() ?? "current").replace( + /[\/\\]/g, + "_", + ).replace(/\.\./g, "_").replace(/\0/g, ""); + const content = await context.dataRepository.getContent( + context.modelType, + context.modelId, + instanceName, + ); + if (!content) { + throw new Error("No existing state found - run create or get first"); + } + const existing = JSON.parse(new TextDecoder().decode(content)); + const identifier = existing.ClusterName?.toString(); + if (!identifier) { + throw new Error("No identifier found in existing state"); + } + const currentState = await readResource( + "AWS::ElastiCache::CacheCluster", + identifier, + ) as StateData; + const desiredState: Record = { ...currentState }; + for (const [key, value] of Object.entries(g)) { + if (value !== undefined) desiredState[key] = value; + } + const result = await updateResource( + "AWS::ElastiCache::CacheCluster", + identifier, + currentState, + desiredState, + [ + "Port", + "SnapshotArns", + "SnapshotName", + "CacheSubnetGroupName", + "ClusterName", + "Engine", + "NetworkType", + ], + ); + const handle = await context.writeResource( + "state", + instanceName, + result, + ); + return { dataHandles: [handle] }; + }, + }, + delete: { + description: "Delete a ElastiCache CacheCluster", + arguments: z.object({ + identifier: z.string().describe( + "The primary identifier of the ElastiCache CacheCluster", + ), + }), + execute: async (args: { identifier: string }, context: any) => { + const { existed } = await deleteResource( + "AWS::ElastiCache::CacheCluster", + args.identifier, + ); + const instanceName = + (context.globalArgs.ClusterName?.toString() ?? args.identifier) + .replace(/[\/\\]/g, "_").replace(/\.\./g, "_").replace(/\0/g, ""); + const handle = await context.writeResource("state", instanceName, { + identifier: args.identifier, + existed, + status: existed ? "deleted" : "not_found", + deletedAt: new Date().toISOString(), + }); + return { dataHandles: [handle] }; + }, + }, + sync: { + description: "Sync ElastiCache CacheCluster state from AWS", + arguments: z.object({}), + execute: async (_args: Record, context: any) => { + const g = context.globalArgs; + const instanceName = (g.ClusterName?.toString() ?? "current").replace( + /[\/\\]/g, + "_", + ).replace(/\.\./g, "_").replace(/\0/g, ""); + const content = await context.dataRepository.getContent( + context.modelType, + context.modelId, + instanceName, + ); + if (!content) { + throw new Error("No existing state found - run create or get first"); + } + const existing = JSON.parse(new TextDecoder().decode(content)); + const identifier = existing.ClusterName?.toString(); + if (!identifier) { + throw new Error("No identifier found in existing state"); + } + try { + const result = await readResource( + "AWS::ElastiCache::CacheCluster", + identifier, + ) as StateData; + const handle = await context.writeResource( + "state", + instanceName, + result, + ); + return { dataHandles: [handle] }; + } catch (error: unknown) { + if (isResourceNotFoundError(error)) { + const handle = await context.writeResource("state", instanceName, { + identifier, + status: "not_found", + syncedAt: new Date().toISOString(), + }); + return { dataHandles: [handle] }; + } + throw error; + } + }, + }, + }, +}; diff --git a/model/aws/elasticache/manifest.yaml b/model/aws/elasticache/manifest.yaml index 536cf805d..2f16bbe88 100644 --- a/model/aws/elasticache/manifest.yaml +++ b/model/aws/elasticache/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/aws/elasticache" -version: "2026.04.23.3" +version: "2026.05.06.1" description: "AWS ELASTICACHE infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -9,7 +9,10 @@ labels: - elasticache - cloud - infrastructure +releaseNotes: | + - Added: cache_cluster models: + - cache_cluster.ts - global_replication_group.ts - parameter_group.ts - replication_group.ts diff --git a/model/aws/interconnect/manifest.yaml b/model/aws/interconnect/manifest.yaml index 63510dcb5..70dc248a7 100644 --- a/model/aws/interconnect/manifest.yaml +++ b/model/aws/interconnect/manifest.yaml @@ -9,8 +9,6 @@ labels: - interconnect - cloud - infrastructure -releaseNotes: | - - Updated: connection models: - connection.ts additionalFiles: diff --git a/model/aws/rds/extensions/models/custom_dbengine_version.ts b/model/aws/rds/extensions/models/custom_dbengine_version.ts index 1a21f6903..6df680f28 100644 --- a/model/aws/rds/extensions/models/custom_dbengine_version.ts +++ b/model/aws/rds/extensions/models/custom_dbengine_version.ts @@ -66,6 +66,7 @@ const GlobalArgsSchema = z.object({ Status: z.enum(["available", "inactive", "inactive-except-restore"]).describe( "A value that indicates the status of a custom engine version (CEV).", ).optional(), + DatabaseInstallationFiles: z.array(z.string()).optional(), Tags: z.array(TagSchema).describe( "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*", ).optional(), @@ -84,6 +85,7 @@ const StateSchema = z.object({ UseAwsProvidedLatestImage: z.boolean().optional(), ImageId: z.string().optional(), Status: z.string().optional(), + DatabaseInstallationFiles: z.array(z.string()).optional(), Tags: z.array(TagSchema).optional(), }).passthrough(); @@ -123,6 +125,7 @@ const InputsSchema = z.object({ Status: z.enum(["available", "inactive", "inactive-except-restore"]).describe( "A value that indicates the status of a custom engine version (CEV).", ).optional(), + DatabaseInstallationFiles: z.array(z.string()).optional(), Tags: z.array(TagSchema).describe( "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*", ).optional(), @@ -131,7 +134,7 @@ const InputsSchema = z.object({ /** Swamp extension model for RDS CustomDBEngineVersion. Registered at `@swamp/aws/rds/custom-dbengine-version`. */ export const model = { type: "@swamp/aws/rds/custom-dbengine-version", - version: "2026.04.23.2", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -158,6 +161,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "Added: DatabaseInstallationFiles", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, @@ -268,6 +276,7 @@ export const model = { "EngineVersion", "DatabaseInstallationFilesS3BucketName", "DatabaseInstallationFilesS3Prefix", + "DatabaseInstallationFiles", "ImageId", "KMSKeyId", "Manifest", diff --git a/model/aws/rds/manifest.yaml b/model/aws/rds/manifest.yaml index 98725a969..475ca9151 100644 --- a/model/aws/rds/manifest.yaml +++ b/model/aws/rds/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/aws/rds" -version: "2026.04.23.3" +version: "2026.05.06.1" description: "AWS RDS infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -9,6 +9,8 @@ labels: - rds - cloud - infrastructure +releaseNotes: | + - Updated: custom_dbengine_version models: - custom_dbengine_version.ts - dbcluster.ts diff --git a/model/aws/vpclattice/manifest.yaml b/model/aws/vpclattice/manifest.yaml index 66dee4498..b0788f642 100644 --- a/model/aws/vpclattice/manifest.yaml +++ b/model/aws/vpclattice/manifest.yaml @@ -9,8 +9,6 @@ labels: - vpclattice - cloud - infrastructure -releaseNotes: | - - Updated: resource_gateway models: - access_log_subscription.ts - auth_policy.ts diff --git a/model/digitalocean/manifest.yaml b/model/digitalocean/manifest.yaml index 55c7800af..eaed906e7 100644 --- a/model/digitalocean/manifest.yaml +++ b/model/digitalocean/manifest.yaml @@ -8,8 +8,6 @@ labels: - digitalocean - cloud - infrastructure -releaseNotes: | - - Updated: app_platform models: - app_platform.ts - byoip_prefix.ts diff --git a/model/gcp/accesscontextmanager/extensions/models/permissions.ts b/model/gcp/accesscontextmanager/extensions/models/permissions.ts index 433b5424f..a2d8a8d11 100644 --- a/model/gcp/accesscontextmanager/extensions/models/permissions.ts +++ b/model/gcp/accesscontextmanager/extensions/models/permissions.ts @@ -6,7 +6,7 @@ /** * Swamp extension model for Google Cloud Access Context Manager Permissions. * - * Lists all supported permissions in VPCSC Granular Controls. + * Lists all supported permissions in VPC Service Controls ingress and egress rules for Granular Controls. * * Wraps the GCP resource as a swamp model so create, get, update, * delete, and sync can be driven through `swamp model`. @@ -55,7 +55,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Access Context Manager Permissions. Registered at `@swamp/gcp/accesscontextmanager/permissions`. */ export const model = { type: "@swamp/gcp/accesscontextmanager/permissions", - version: "2026.05.04.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -97,13 +97,18 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, resources: { state: { description: - "Lists all supported permissions in VPCSC Granular Controls.", + "Lists all supported permissions in VPC Service Controls ingress and egress ru...", schema: StateSchema, lifetime: "infinite", garbageCollection: 10, diff --git a/model/gcp/accesscontextmanager/manifest.yaml b/model/gcp/accesscontextmanager/manifest.yaml index b09975cf2..4f7410483 100644 --- a/model/gcp/accesscontextmanager/manifest.yaml +++ b/model/gcp/accesscontextmanager/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/accesscontextmanager" -version: "2026.05.04.1" +version: "2026.05.06.1" description: "Google Cloud accesscontextmanager infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - accesscontextmanager - cloud - infrastructure +releaseNotes: | + - Updated: permissions models: - accesspolicies.ts - accesspolicies_accesslevels.ts diff --git a/model/gcp/bigtableadmin/extensions/models/instances_clusters.ts b/model/gcp/bigtableadmin/extensions/models/instances_clusters.ts index 08c39d0a6..6d06d6e5e 100644 --- a/model/gcp/bigtableadmin/extensions/models/instances_clusters.ts +++ b/model/gcp/bigtableadmin/extensions/models/instances_clusters.ts @@ -233,7 +233,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Bigtable Admin Instances.Clusters. Registered at `@swamp/gcp/bigtableadmin/instances-clusters`. */ export const model = { type: "@swamp/gcp/bigtableadmin/instances-clusters", - version: "2026.05.04.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -280,6 +280,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, @@ -618,48 +623,5 @@ export const model = { return { result }; }, }, - update_memory_layer: { - description: "update memory layer", - arguments: z.object({ - etag: z.any().optional(), - memoryConfig: z.any().optional(), - name: z.any().optional(), - state: z.any().optional(), - }), - execute: async (args: Record, context: any) => { - const g = context.globalArgs; - const projectId = await getProjectId(); - const params: Record = { project: projectId }; - if (g["parent"] !== undefined && g["name"] !== undefined) { - params["name"] = buildResourceName( - String(g["parent"]), - String(g["name"]), - ); - } - const body: Record = {}; - if (args["etag"] !== undefined) body["etag"] = args["etag"]; - if (args["memoryConfig"] !== undefined) { - body["memoryConfig"] = args["memoryConfig"]; - } - if (args["name"] !== undefined) body["name"] = args["name"]; - if (args["state"] !== undefined) body["state"] = args["state"]; - const result = await createResource( - BASE_URL, - { - "id": "bigtableadmin.projects.instances.clusters.updateMemoryLayer", - "path": "v2/{+name}", - "httpMethod": "PATCH", - "parameterOrder": ["name"], - "parameters": { - "name": { "location": "path", "required": true }, - "updateMask": { "location": "query" }, - }, - }, - params, - body, - ); - return { result }; - }, - }, }, }; diff --git a/model/gcp/bigtableadmin/manifest.yaml b/model/gcp/bigtableadmin/manifest.yaml index 209a6f3c9..f972a46d8 100644 --- a/model/gcp/bigtableadmin/manifest.yaml +++ b/model/gcp/bigtableadmin/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/bigtableadmin" -version: "2026.05.04.1" +version: "2026.05.06.1" description: "Google Cloud bigtableadmin infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - bigtableadmin - cloud - infrastructure +releaseNotes: | + - Updated: instances_clusters models: - instances.ts - instances_appprofiles.ts diff --git a/model/gcp/datalineage/extensions/models/processes_runs.ts b/model/gcp/datalineage/extensions/models/processes_runs.ts index 56367f254..850618144 100644 --- a/model/gcp/datalineage/extensions/models/processes_runs.ts +++ b/model/gcp/datalineage/extensions/models/processes_runs.ts @@ -108,7 +108,7 @@ const GlobalArgsSchema = z.object({ "Optional. The attributes of the run. Should only be used for the purpose of non-semantic management (classifying, describing or labeling the run). Up to 100 attributes are allowed.", ).optional(), displayName: z.string().describe( - "Optional. A human-readable name you can set to display in a user interface. Must be not longer than 1024 characters and only contain UTF-8 letters or numbers, spaces or characters like `_-:&.`", + "Optional. A human-readable name you can set to display in a user interface. Must be not longer than 200 characters and only contain UTF-8 letters or numbers, spaces or characters like `_-:&.`", ).optional(), endTime: z.string().describe("Optional. The timestamp of the end of the run.") .optional(), @@ -144,7 +144,7 @@ const InputsSchema = z.object({ "Optional. The attributes of the run. Should only be used for the purpose of non-semantic management (classifying, describing or labeling the run). Up to 100 attributes are allowed.", ).optional(), displayName: z.string().describe( - "Optional. A human-readable name you can set to display in a user interface. Must be not longer than 1024 characters and only contain UTF-8 letters or numbers, spaces or characters like `_-:&.`", + "Optional. A human-readable name you can set to display in a user interface. Must be not longer than 200 characters and only contain UTF-8 letters or numbers, spaces or characters like `_-:&.`", ).optional(), endTime: z.string().describe("Optional. The timestamp of the end of the run.") .optional(), @@ -167,7 +167,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Data Lineage Processes.Runs. Registered at `@swamp/gcp/datalineage/processes-runs`. */ export const model = { type: "@swamp/gcp/datalineage/processes-runs", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -199,6 +199,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/datalineage/manifest.yaml b/model/gcp/datalineage/manifest.yaml index a05c4cb44..a12d4cb03 100644 --- a/model/gcp/datalineage/manifest.yaml +++ b/model/gcp/datalineage/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/datalineage" -version: "2026.04.23.1" +version: "2026.05.06.1" description: "Google Cloud datalineage infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - datalineage - cloud - infrastructure +releaseNotes: | + - Updated: processes_runs models: - config.ts - processes.ts diff --git a/model/gcp/datamigration/extensions/models/migrationjobs.ts b/model/gcp/datamigration/extensions/models/migrationjobs.ts index 79c9ca653..06246bd2b 100644 --- a/model/gcp/datamigration/extensions/models/migrationjobs.ts +++ b/model/gcp/datamigration/extensions/models/migrationjobs.ts @@ -263,6 +263,15 @@ const GlobalArgsSchema = z.object({ "MAX", ]).describe("Initial dump parallelism level.").optional(), }).describe("Performance configuration definition.").optional(), + postgresHomogeneousConfig: z.object({ + isNativeLogical: z.boolean().describe( + "Required. Whether the migration is native logical.", + ).optional(), + maxAdditionalSubscriptions: z.number().int().describe( + "Optional. Maximum number of additional subscriptions to use for the migration job.", + ).optional(), + }).describe("Configuration for PostgreSQL to PostgreSQL migrations.") + .optional(), postgresToSqlserverConfig: z.object({ postgresSourceConfig: z.object({ skipFullDump: z.boolean().describe( @@ -499,6 +508,10 @@ const StateSchema = z.object({ dumpParallelLevel: z.string(), }).optional(), phase: z.string().optional(), + postgresHomogeneousConfig: z.object({ + isNativeLogical: z.boolean(), + maxAdditionalSubscriptions: z.number(), + }).optional(), postgresToSqlserverConfig: z.object({ postgresSourceConfig: z.object({ skipFullDump: z.boolean(), @@ -716,6 +729,15 @@ const InputsSchema = z.object({ "MAX", ]).describe("Initial dump parallelism level.").optional(), }).describe("Performance configuration definition.").optional(), + postgresHomogeneousConfig: z.object({ + isNativeLogical: z.boolean().describe( + "Required. Whether the migration is native logical.", + ).optional(), + maxAdditionalSubscriptions: z.number().int().describe( + "Optional. Maximum number of additional subscriptions to use for the migration job.", + ).optional(), + }).describe("Configuration for PostgreSQL to PostgreSQL migrations.") + .optional(), postgresToSqlserverConfig: z.object({ postgresSourceConfig: z.object({ skipFullDump: z.boolean().describe( @@ -886,7 +908,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Database Migration MigrationJobs. Registered at `@swamp/gcp/datamigration/migrationjobs`. */ export const model = { type: "@swamp/gcp/datamigration/migrationjobs", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -923,6 +945,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "Added: postgresHomogeneousConfig", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, @@ -983,6 +1010,9 @@ export const model = { if (g["performanceConfig"] !== undefined) { body["performanceConfig"] = g["performanceConfig"]; } + if (g["postgresHomogeneousConfig"] !== undefined) { + body["postgresHomogeneousConfig"] = g["postgresHomogeneousConfig"]; + } if (g["postgresToSqlserverConfig"] !== undefined) { body["postgresToSqlserverConfig"] = g["postgresToSqlserverConfig"]; } @@ -1135,6 +1165,9 @@ export const model = { if (g["performanceConfig"] !== undefined) { body["performanceConfig"] = g["performanceConfig"]; } + if (g["postgresHomogeneousConfig"] !== undefined) { + body["postgresHomogeneousConfig"] = g["postgresHomogeneousConfig"]; + } if (g["postgresToSqlserverConfig"] !== undefined) { body["postgresToSqlserverConfig"] = g["postgresToSqlserverConfig"]; } diff --git a/model/gcp/datamigration/manifest.yaml b/model/gcp/datamigration/manifest.yaml index 3b34fc45a..854f3a730 100644 --- a/model/gcp/datamigration/manifest.yaml +++ b/model/gcp/datamigration/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/datamigration" -version: "2026.04.23.1" +version: "2026.05.06.1" description: "Google Cloud datamigration infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - datamigration - cloud - infrastructure +releaseNotes: | + - Updated: migrationjobs models: - connectionprofiles.ts - conversionworkspaces.ts diff --git a/model/gcp/dataproc/extensions/models/clusters.ts b/model/gcp/dataproc/extensions/models/clusters.ts index e3407ebfd..205486c99 100644 --- a/model/gcp/dataproc/extensions/models/clusters.ts +++ b/model/gcp/dataproc/extensions/models/clusters.ts @@ -234,7 +234,7 @@ const GlobalArgsSchema = z.object({ "ZERO_SCALE", ]).describe("Optional. The type of the cluster.").optional(), configBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster staging bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), dataprocMetricConfig: z.object({ metrics: z.array(z.object({ @@ -284,7 +284,7 @@ const GlobalArgsSchema = z.object({ ).optional(), confidentialInstanceConfig: z.object({ enableConfidentialCompute: z.boolean().describe( - "Optional. Defines whether the instance should have confidential compute enabled.", + "Optional. Deprecated: Use 'confidential_instance_type' instead. Defines whether the instance should have confidential compute enabled.", ).optional(), }).describe( "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", @@ -296,7 +296,7 @@ const GlobalArgsSchema = z.object({ "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", ).optional(), networkUri: z.string().describe( - 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', + 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a Custom Subnet Network (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', ).optional(), nodeGroupAffinity: z.object({ nodeGroupUri: z.string().describe( @@ -468,10 +468,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -624,10 +624,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -833,7 +833,7 @@ const GlobalArgsSchema = z.object({ "Specifies the selection and config of software inside the cluster.", ).optional(), tempBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster temp bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), workerConfig: z.object({ accelerators: z.array(z.object({ @@ -870,10 +870,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -1086,7 +1086,7 @@ const GlobalArgsSchema = z.object({ "The configuration for running the Dataproc cluster on Kubernetes.", ).optional(), stagingBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), }).describe( "The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).", @@ -1552,7 +1552,7 @@ const InputsSchema = z.object({ "ZERO_SCALE", ]).describe("Optional. The type of the cluster.").optional(), configBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster staging bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), dataprocMetricConfig: z.object({ metrics: z.array(z.object({ @@ -1602,7 +1602,7 @@ const InputsSchema = z.object({ ).optional(), confidentialInstanceConfig: z.object({ enableConfidentialCompute: z.boolean().describe( - "Optional. Defines whether the instance should have confidential compute enabled.", + "Optional. Deprecated: Use 'confidential_instance_type' instead. Defines whether the instance should have confidential compute enabled.", ).optional(), }).describe( "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", @@ -1614,7 +1614,7 @@ const InputsSchema = z.object({ "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", ).optional(), networkUri: z.string().describe( - 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', + 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a Custom Subnet Network (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', ).optional(), nodeGroupAffinity: z.object({ nodeGroupUri: z.string().describe( @@ -1786,10 +1786,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -1942,10 +1942,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -2151,7 +2151,7 @@ const InputsSchema = z.object({ "Specifies the selection and config of software inside the cluster.", ).optional(), tempBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster temp bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), workerConfig: z.object({ accelerators: z.array(z.object({ @@ -2188,10 +2188,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -2404,7 +2404,7 @@ const InputsSchema = z.object({ "The configuration for running the Dataproc cluster on Kubernetes.", ).optional(), stagingBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), }).describe( "The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).", @@ -2423,7 +2423,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Dataproc Clusters. Registered at `@swamp/gcp/dataproc/clusters`. */ export const model = { type: "@swamp/gcp/dataproc/clusters", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -2470,6 +2470,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/dataproc/extensions/models/clusters_nodegroups.ts b/model/gcp/dataproc/extensions/models/clusters_nodegroups.ts index 8208eab32..105887a00 100644 --- a/model/gcp/dataproc/extensions/models/clusters_nodegroups.ts +++ b/model/gcp/dataproc/extensions/models/clusters_nodegroups.ts @@ -116,10 +116,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -361,10 +361,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.string().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.string().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.number().int().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -496,7 +496,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Dataproc Clusters.NodeGroups. Registered at `@swamp/gcp/dataproc/clusters-nodegroups`. */ export const model = { type: "@swamp/gcp/dataproc/clusters-nodegroups", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -533,6 +533,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/dataproc/extensions/models/workflowtemplates.ts b/model/gcp/dataproc/extensions/models/workflowtemplates.ts index f1b38304a..d82539b4e 100644 --- a/model/gcp/dataproc/extensions/models/workflowtemplates.ts +++ b/model/gcp/dataproc/extensions/models/workflowtemplates.ts @@ -479,7 +479,7 @@ const GlobalArgsSchema = z.object({ "ZERO_SCALE", ]).describe("Optional. The type of the cluster.").optional(), configBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster staging bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), dataprocMetricConfig: z.object({ metrics: z.array(z.unknown()).describe( @@ -514,7 +514,7 @@ const GlobalArgsSchema = z.object({ ).optional(), confidentialInstanceConfig: z.object({ enableConfidentialCompute: z.unknown().describe( - "Optional. Defines whether the instance should have confidential compute enabled.", + "Optional. Deprecated: Use 'confidential_instance_type' instead. Defines whether the instance should have confidential compute enabled.", ).optional(), }).describe( "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", @@ -526,7 +526,7 @@ const GlobalArgsSchema = z.object({ "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", ).optional(), networkUri: z.string().describe( - 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', + 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a Custom Subnet Network (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', ).optional(), nodeGroupAffinity: z.object({ nodeGroupUri: z.unknown().describe( @@ -658,10 +658,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -763,10 +763,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -917,7 +917,7 @@ const GlobalArgsSchema = z.object({ "Specifies the selection and config of software inside the cluster.", ).optional(), tempBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster temp bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), workerConfig: z.object({ accelerators: z.array(z.unknown()).describe( @@ -937,10 +937,10 @@ const GlobalArgsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -1791,7 +1791,7 @@ const InputsSchema = z.object({ "ZERO_SCALE", ]).describe("Optional. The type of the cluster.").optional(), configBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster staging bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), dataprocMetricConfig: z.object({ metrics: z.array(z.unknown()).describe( @@ -1826,7 +1826,7 @@ const InputsSchema = z.object({ ).optional(), confidentialInstanceConfig: z.object({ enableConfidentialCompute: z.unknown().describe( - "Optional. Defines whether the instance should have confidential compute enabled.", + "Optional. Deprecated: Use 'confidential_instance_type' instead. Defines whether the instance should have confidential compute enabled.", ).optional(), }).describe( "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", @@ -1838,7 +1838,7 @@ const InputsSchema = z.object({ "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", ).optional(), networkUri: z.string().describe( - 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', + 'Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a Custom Subnet Network (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default', ).optional(), nodeGroupAffinity: z.object({ nodeGroupUri: z.unknown().describe( @@ -1970,10 +1970,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -2075,10 +2075,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -2229,7 +2229,7 @@ const InputsSchema = z.object({ "Specifies the selection and config of software inside the cluster.", ).optional(), tempBucket: z.string().describe( - "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", + "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc determines a Cloud Storage location (US, ASIA, or EU) for the cluster temp bucket according to the Compute Engine zone where the cluster is deployed, and then creates and manages this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", ).optional(), workerConfig: z.object({ accelerators: z.array(z.unknown()).describe( @@ -2249,10 +2249,10 @@ const InputsSchema = z.object({ "Optional. Size in GB of the boot disk (default is 500GB).", ).optional(), bootDiskType: z.unknown().describe( - 'Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).', + "Optional. Type of the boot disk (default is pd-standard). Valid values: pd-balanced (Persistent Disk Balanced Solid State Drive), pd-ssd (Persistent Disk Solid State Drive), or pd-standard (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", ).optional(), localSsdInterface: z.unknown().describe( - 'Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).', + "Optional. Interface type of local SSDs (default is scsi). Valid values: scsi (Small Computer System Interface), nvme (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", ).optional(), numLocalSsds: z.unknown().describe( "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", @@ -2350,7 +2350,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Dataproc WorkflowTemplates. Registered at `@swamp/gcp/dataproc/workflowtemplates`. */ export const model = { type: "@swamp/gcp/dataproc/workflowtemplates", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -2397,6 +2397,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/dataproc/manifest.yaml b/model/gcp/dataproc/manifest.yaml index 73687377c..1865bdad9 100644 --- a/model/gcp/dataproc/manifest.yaml +++ b/model/gcp/dataproc/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/dataproc" -version: "2026.05.01.1" +version: "2026.05.06.1" description: "Google Cloud dataproc infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - dataproc - cloud - infrastructure +releaseNotes: | + - Updated: workflowtemplates, clusters, clusters_nodegroups models: - autoscalingpolicies.ts - batches.ts diff --git a/model/gcp/firestore/manifest.yaml b/model/gcp/firestore/manifest.yaml index b7f4f1cee..5a1cbe25b 100644 --- a/model/gcp/firestore/manifest.yaml +++ b/model/gcp/firestore/manifest.yaml @@ -10,8 +10,6 @@ labels: - firestore - cloud - infrastructure -releaseNotes: | - - Updated: databases models: - backups.ts - databases.ts diff --git a/model/gcp/hypercomputecluster/extensions/models/clusters.ts b/model/gcp/hypercomputecluster/extensions/models/clusters.ts index 4924c6136..892ba0118 100644 --- a/model/gcp/hypercomputecluster/extensions/models/clusters.ts +++ b/model/gcp/hypercomputecluster/extensions/models/clusters.ts @@ -226,7 +226,7 @@ const GlobalArgsSchema = z.object({ loginNodes: z.object({ bootDisk: z.object({ sizeGb: z.string().describe( - "Required. Immutable. Size of the disk in gigabytes. Must be at least 10GB.", + "Required. Immutable. Size of the disk in gigabytes. Must be at least 40GB.", ).optional(), type: z.string().describe( "Required. Immutable. [Persistent disk type](https://cloud.google.com/compute/docs/disks#disk-types), in the format `projects/{project}/zones/{zone}/diskTypes/{disk_type}`.", @@ -638,7 +638,7 @@ const InputsSchema = z.object({ loginNodes: z.object({ bootDisk: z.object({ sizeGb: z.string().describe( - "Required. Immutable. Size of the disk in gigabytes. Must be at least 10GB.", + "Required. Immutable. Size of the disk in gigabytes. Must be at least 40GB.", ).optional(), type: z.string().describe( "Required. Immutable. [Persistent disk type](https://cloud.google.com/compute/docs/disks#disk-types), in the format `projects/{project}/zones/{zone}/diskTypes/{disk_type}`.", @@ -876,7 +876,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Cluster Director Clusters. Registered at `@swamp/gcp/hypercomputecluster/clusters`. */ export const model = { type: "@swamp/gcp/hypercomputecluster/clusters", - version: "2026.05.01.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -923,6 +923,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/hypercomputecluster/manifest.yaml b/model/gcp/hypercomputecluster/manifest.yaml index ec263c830..736d4bd45 100644 --- a/model/gcp/hypercomputecluster/manifest.yaml +++ b/model/gcp/hypercomputecluster/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/hypercomputecluster" -version: "2026.05.01.1" +version: "2026.05.06.1" description: "Google Cloud hypercomputecluster infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - hypercomputecluster - cloud - infrastructure +releaseNotes: | + - Updated: clusters models: - clusters.ts - locations.ts diff --git a/model/gcp/logging/extensions/models/savedqueries.ts b/model/gcp/logging/extensions/models/savedqueries.ts index 1142caabf..c340e9cb5 100644 --- a/model/gcp/logging/extensions/models/savedqueries.ts +++ b/model/gcp/logging/extensions/models/savedqueries.ts @@ -153,7 +153,7 @@ const GlobalArgsSchema = z.object({ "The cast for the field. This can any SQL cast type. Examples: - STRING - CHAR - DATE - TIMESTAMP - DATETIME - INT - FLOAT", ).optional(), field: z.unknown().describe( - "The field name. This will be the field that is selected using the dot notation to display the drill down value.", + "Optional. The field name. This will be the field that is selected using the dot notation to display the drill down value.", ).optional(), operation: z.unknown().describe( "Specifies the role of this field (direct selection, grouping, or aggregation).", @@ -168,7 +168,7 @@ const GlobalArgsSchema = z.object({ "The truncation granularity when grouping by a time/date field. This will be used to truncate the field to the granularity specified. This can be either a date or a time granularity found at https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#timestamp_trunc_granularity_date and https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#timestamp_trunc_granularity_time respectively.", ).optional(), }).describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), })).describe( "Defines the items to include in the query result, analogous to a SQL SELECT clause.", @@ -207,10 +207,10 @@ const GlobalArgsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), fieldSourceValue: z.object({ aliasRef: z.unknown().describe( @@ -229,10 +229,10 @@ const GlobalArgsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), isNegation: z.boolean().describe( "Determines if the NOT flag should be added to the comparator.", @@ -271,10 +271,10 @@ const GlobalArgsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), sortOrderDirection: z.enum([ "SORT_ORDER_UNSPECIFIED", @@ -442,7 +442,7 @@ const InputsSchema = z.object({ "The cast for the field. This can any SQL cast type. Examples: - STRING - CHAR - DATE - TIMESTAMP - DATETIME - INT - FLOAT", ).optional(), field: z.unknown().describe( - "The field name. This will be the field that is selected using the dot notation to display the drill down value.", + "Optional. The field name. This will be the field that is selected using the dot notation to display the drill down value.", ).optional(), operation: z.unknown().describe( "Specifies the role of this field (direct selection, grouping, or aggregation).", @@ -457,7 +457,7 @@ const InputsSchema = z.object({ "The truncation granularity when grouping by a time/date field. This will be used to truncate the field to the granularity specified. This can be either a date or a time granularity found at https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#timestamp_trunc_granularity_date and https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#timestamp_trunc_granularity_time respectively.", ).optional(), }).describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), })).describe( "Defines the items to include in the query result, analogous to a SQL SELECT clause.", @@ -496,10 +496,10 @@ const InputsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), fieldSourceValue: z.object({ aliasRef: z.unknown().describe( @@ -518,10 +518,10 @@ const InputsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), isNegation: z.boolean().describe( "Determines if the NOT flag should be added to the comparator.", @@ -560,10 +560,10 @@ const InputsSchema = z.object({ "The dot-delimited path of the parent container that holds the target field.This path defines the structural hierarchy and is essential for correctly generating SQL when field keys contain special characters (e.g., dots or brackets).Example: json_payload.labels (This points to the 'labels' object). This is an empty string if the target field is at the root level.", ).optional(), projectedField: z.unknown().describe( - "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).", + "Represents a field selected in the query, analogous to an item in a SQL SELECT clause. It specifies the source field and optionally applies transformations like aggregation, casting, regex extraction, or assigns an alias. Use ProjectedField when you need more than just the raw source field name (for which you might use FieldSource directly in QueryBuilderConfig's field_sources list if no transformations or specific operation type are needed).A ProjectedField can represent either a field present in the data schema (specified via the field property) or a virtual field that is computed from other fields (specified via the virtual_field property).", ).optional(), }).describe( - "A source that can be used to represent a field within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses.", + 'A source that can be used to represent a "field of data" within various parts of a structured query, such as in SELECT, WHERE, or ORDER BY clauses. The term "field of data" is used here because it is not limited to literal fields in the underlying data schema.', ).optional(), sortOrderDirection: z.enum([ "SORT_ORDER_UNSPECIFIED", @@ -601,7 +601,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Logging SavedQueries. Registered at `@swamp/gcp/logging/savedqueries`. */ export const model = { type: "@swamp/gcp/logging/savedqueries", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -638,6 +638,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/logging/manifest.yaml b/model/gcp/logging/manifest.yaml index 303dded86..e94d4c5f8 100644 --- a/model/gcp/logging/manifest.yaml +++ b/model/gcp/logging/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/logging" -version: "2026.04.23.1" +version: "2026.05.06.1" description: "Google Cloud logging infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - logging - cloud - infrastructure +releaseNotes: | + - Updated: savedqueries models: - buckets.ts - buckets_links.ts diff --git a/model/gcp/ondemandscanning/manifest.yaml b/model/gcp/ondemandscanning/manifest.yaml index d44a66fe1..bcaea782f 100644 --- a/model/gcp/ondemandscanning/manifest.yaml +++ b/model/gcp/ondemandscanning/manifest.yaml @@ -10,8 +10,6 @@ labels: - ondemandscanning - cloud - infrastructure -releaseNotes: | - - Updated: scans_vulnerabilities models: - scans_vulnerabilities.ts additionalFiles: diff --git a/model/gcp/run/extensions/models/instances.ts b/model/gcp/run/extensions/models/instances.ts index 952ad13f3..7ff8cbe76 100644 --- a/model/gcp/run/extensions/models/instances.ts +++ b/model/gcp/run/extensions/models/instances.ts @@ -364,6 +364,9 @@ const GlobalArgsSchema = z.object({ })).describe( "Required. Holds the single container that defines the unit of execution for this Instance.", ).optional(), + defaultUriDisabled: z.boolean().describe( + "Optional. Disables public resolution of the default URI of this Instance.", + ).optional(), description: z.string().describe( "User-provided description of the Instance. This field currently has a 512-character limit.", ).optional(), @@ -709,6 +712,7 @@ const StateSchema = z.object({ })).optional(), createTime: z.string().optional(), creator: z.string().optional(), + defaultUriDisabled: z.boolean().optional(), deleteTime: z.string().optional(), description: z.string().optional(), encryptionKey: z.string().optional(), @@ -1041,6 +1045,9 @@ const InputsSchema = z.object({ })).describe( "Required. Holds the single container that defines the unit of execution for this Instance.", ).optional(), + defaultUriDisabled: z.boolean().describe( + "Optional. Disables public resolution of the default URI of this Instance.", + ).optional(), description: z.string().describe( "User-provided description of the Instance. This field currently has a 512-character limit.", ).optional(), @@ -1266,7 +1273,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud Run Admin Instances. Registered at `@swamp/gcp/run/instances`. */ export const model = { type: "@swamp/gcp/run/instances", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -1308,6 +1315,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "Added: defaultUriDisabled", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, @@ -1341,6 +1353,9 @@ export const model = { body["clientVersion"] = g["clientVersion"]; } if (g["containers"] !== undefined) body["containers"] = g["containers"]; + if (g["defaultUriDisabled"] !== undefined) { + body["defaultUriDisabled"] = g["defaultUriDisabled"]; + } if (g["description"] !== undefined) { body["description"] = g["description"]; } @@ -1471,6 +1486,9 @@ export const model = { body["clientVersion"] = g["clientVersion"]; } if (g["containers"] !== undefined) body["containers"] = g["containers"]; + if (g["defaultUriDisabled"] !== undefined) { + body["defaultUriDisabled"] = g["defaultUriDisabled"]; + } if (g["description"] !== undefined) { body["description"] = g["description"]; } diff --git a/model/gcp/run/manifest.yaml b/model/gcp/run/manifest.yaml index 698c95dc5..78456dd78 100644 --- a/model/gcp/run/manifest.yaml +++ b/model/gcp/run/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/run" -version: "2026.04.23.1" +version: "2026.05.06.1" description: "Google Cloud run infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - run - cloud - infrastructure +releaseNotes: | + - Updated: instances models: - instances.ts - jobs.ts diff --git a/model/gcp/sqladmin/extensions/models/instances.ts b/model/gcp/sqladmin/extensions/models/instances.ts index ebb57cf17..c70c3635d 100644 --- a/model/gcp/sqladmin/extensions/models/instances.ts +++ b/model/gcp/sqladmin/extensions/models/instances.ts @@ -169,6 +169,7 @@ const GlobalArgsSchema = z.object({ "POSTGRES_16", "POSTGRES_17", "POSTGRES_18", + "POSTGRES_19", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", @@ -837,24 +838,24 @@ const GlobalArgsSchema = z.object({ ).optional(), performanceCaptureConfig: z.object({ enabled: z.boolean().describe( - "Optional. Enable or disable the Performance Capture feature.", + "Optional. Enables or disables the performance capture feature.", ).optional(), probeThreshold: z.number().int().describe( - "Optional. The minimum number of consecutive readings above threshold that triggers instance state capture.", + "Optional. Specifies the minimum number of consecutive probe threshold that triggers performance capture.", ).optional(), probingIntervalSeconds: z.number().int().describe( - "Optional. The time interval in seconds between any two probes.", + "Optional. Specifies the interval in seconds between consecutive probes that check if any trigger condition thresholds have been reached.", ).optional(), runningThreadsThreshold: z.number().int().describe( - "Optional. The minimum number of server threads running to trigger the capture on primary.", + "Optional. Specifies the minimum number of MySQL `Threads_running` to trigger the performance capture on the primary instance.", ).optional(), secondsBehindSourceThreshold: z.number().int().describe( - "Optional. The minimum number of seconds replica must be lagging behind primary to trigger capture on replica.", + "Optional. Specifies the minimum number of seconds replica must be lagging behind primary instance to trigger the performance capture on replica.", ).optional(), transactionDurationThreshold: z.number().int().describe( - "Optional. The amount of time in seconds that a transaction needs to have been open before the watcher starts recording it.", + "Optional. Specifies the amount of time in seconds that a transaction needs to have been open before the watcher starts recording it.", ).optional(), - }).describe("Performance Capture configuration.").optional(), + }).describe("Performance capture configuration.").optional(), pricingPlan: z.enum(["SQL_PRICING_PLAN_UNSPECIFIED", "PACKAGE", "PER_USE"]) .describe( "The pricing plan for this instance. This can be either `PER_USE` or `PACKAGE`. Only `PER_USE` is supported for Second Generation instances.", @@ -956,6 +957,7 @@ const GlobalArgsSchema = z.object({ "LEGAL_ISSUE", "OPERATIONAL_ISSUE", "KMS_KEY_ISSUE", + "PROJECT_ABUSE", ]), ).describe( "If the instance state is SUSPENDED, the reason for the suspension.", @@ -1363,6 +1365,7 @@ const InputsSchema = z.object({ "POSTGRES_16", "POSTGRES_17", "POSTGRES_18", + "POSTGRES_19", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", @@ -2031,24 +2034,24 @@ const InputsSchema = z.object({ ).optional(), performanceCaptureConfig: z.object({ enabled: z.boolean().describe( - "Optional. Enable or disable the Performance Capture feature.", + "Optional. Enables or disables the performance capture feature.", ).optional(), probeThreshold: z.number().int().describe( - "Optional. The minimum number of consecutive readings above threshold that triggers instance state capture.", + "Optional. Specifies the minimum number of consecutive probe threshold that triggers performance capture.", ).optional(), probingIntervalSeconds: z.number().int().describe( - "Optional. The time interval in seconds between any two probes.", + "Optional. Specifies the interval in seconds between consecutive probes that check if any trigger condition thresholds have been reached.", ).optional(), runningThreadsThreshold: z.number().int().describe( - "Optional. The minimum number of server threads running to trigger the capture on primary.", + "Optional. Specifies the minimum number of MySQL `Threads_running` to trigger the performance capture on the primary instance.", ).optional(), secondsBehindSourceThreshold: z.number().int().describe( - "Optional. The minimum number of seconds replica must be lagging behind primary to trigger capture on replica.", + "Optional. Specifies the minimum number of seconds replica must be lagging behind primary instance to trigger the performance capture on replica.", ).optional(), transactionDurationThreshold: z.number().int().describe( - "Optional. The amount of time in seconds that a transaction needs to have been open before the watcher starts recording it.", + "Optional. Specifies the amount of time in seconds that a transaction needs to have been open before the watcher starts recording it.", ).optional(), - }).describe("Performance Capture configuration.").optional(), + }).describe("Performance capture configuration.").optional(), pricingPlan: z.enum(["SQL_PRICING_PLAN_UNSPECIFIED", "PACKAGE", "PER_USE"]) .describe( "The pricing plan for this instance. This can be either `PER_USE` or `PACKAGE`. Only `PER_USE` is supported for Second Generation instances.", @@ -2150,6 +2153,7 @@ const InputsSchema = z.object({ "LEGAL_ISSUE", "OPERATIONAL_ISSUE", "KMS_KEY_ISSUE", + "PROJECT_ABUSE", ]), ).describe( "If the instance state is SUSPENDED, the reason for the suspension.", @@ -2165,7 +2169,7 @@ const InputsSchema = z.object({ /** Swamp extension model for Google Cloud SQL Admin Instances. Registered at `@swamp/gcp/sqladmin/instances`. */ export const model = { type: "@swamp/gcp/sqladmin/instances", - version: "2026.04.23.1", + version: "2026.05.06.1", upgrades: [ { toVersion: "2026.04.01.1", @@ -2212,6 +2216,11 @@ export const model = { description: "No schema changes", upgradeAttributes: (old: Record) => old, }, + { + toVersion: "2026.05.06.1", + description: "No schema changes", + upgradeAttributes: (old: Record) => old, + }, ], globalArguments: GlobalArgsSchema, inputsSchema: InputsSchema, diff --git a/model/gcp/sqladmin/manifest.yaml b/model/gcp/sqladmin/manifest.yaml index 35df656b7..41caa3627 100644 --- a/model/gcp/sqladmin/manifest.yaml +++ b/model/gcp/sqladmin/manifest.yaml @@ -1,7 +1,7 @@ # Auto-generated manifest. Re-generate with the appropriate deno task. manifestVersion: 1 name: "@swamp/gcp/sqladmin" -version: "2026.04.23.1" +version: "2026.05.06.1" description: "Google Cloud sqladmin infrastructure models" repository: "https://github.com/systeminit/swamp-extensions" labels: @@ -10,6 +10,8 @@ labels: - sqladmin - cloud - infrastructure +releaseNotes: | + - Updated: instances models: - backupruns.ts - connect.ts diff --git a/model/gcp/storagebatchoperations/manifest.yaml b/model/gcp/storagebatchoperations/manifest.yaml index 6415c7a3e..39edca2e7 100644 --- a/model/gcp/storagebatchoperations/manifest.yaml +++ b/model/gcp/storagebatchoperations/manifest.yaml @@ -10,8 +10,6 @@ labels: - storagebatchoperations - cloud - infrastructure -releaseNotes: | - - Updated: jobs models: - jobs.ts - locations.ts